summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/prometheus/common
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/prometheus/common')
-rw-r--r--vendor/github.com/prometheus/common/.travis.yml7
-rw-r--r--vendor/github.com/prometheus/common/AUTHORS.md11
-rw-r--r--vendor/github.com/prometheus/common/CONTRIBUTING.md18
-rw-r--r--vendor/github.com/prometheus/common/LICENSE201
-rw-r--r--vendor/github.com/prometheus/common/NOTICE5
-rw-r--r--vendor/github.com/prometheus/common/README.md12
-rw-r--r--vendor/github.com/prometheus/common/config/config.go30
-rw-r--r--vendor/github.com/prometheus/common/config/testdata/tls_config.cert_no_key.bad.yml1
-rw-r--r--vendor/github.com/prometheus/common/config/testdata/tls_config.empty.good.yml0
-rw-r--r--vendor/github.com/prometheus/common/config/testdata/tls_config.insecure.good.yml1
-rw-r--r--vendor/github.com/prometheus/common/config/testdata/tls_config.invalid_field.bad.yml1
-rw-r--r--vendor/github.com/prometheus/common/config/testdata/tls_config.key_no_cert.bad.yml1
-rw-r--r--vendor/github.com/prometheus/common/config/tls_config.go79
-rw-r--r--vendor/github.com/prometheus/common/config/tls_config_test.go92
-rw-r--r--vendor/github.com/prometheus/common/expfmt/bench_test.go167
-rw-r--r--vendor/github.com/prometheus/common/expfmt/decode.go412
-rw-r--r--vendor/github.com/prometheus/common/expfmt/decode_test.go367
-rw-r--r--vendor/github.com/prometheus/common/expfmt/encode.go88
-rw-r--r--vendor/github.com/prometheus/common/expfmt/expfmt.go37
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz.go36
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_02
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_16
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_212
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_322
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_410
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_01
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_101
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_111
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_123
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_133
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_143
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_152
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_162
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_171
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_181
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_193
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_23
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_31
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_41
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_51
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_61
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_73
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_81
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_91
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/minimal1
-rw-r--r--vendor/github.com/prometheus/common/expfmt/testdata/json246
-rw-r--r--vendor/github.com/prometheus/common/expfmt/testdata/json2_bad46
-rw-r--r--vendor/github.com/prometheus/common/expfmt/testdata/protobufbin0 -> 8239 bytes
-rw-r--r--vendor/github.com/prometheus/common/expfmt/testdata/protobuf.gzbin0 -> 2097 bytes
-rw-r--r--vendor/github.com/prometheus/common/expfmt/testdata/text322
-rw-r--r--vendor/github.com/prometheus/common/expfmt/testdata/text.gzbin0 -> 2598 bytes
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_create.go303
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_create_test.go443
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_parse.go753
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_parse_test.go588
-rw-r--r--vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt67
-rw-r--r--vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go162
-rw-r--r--vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go33
-rw-r--r--vendor/github.com/prometheus/common/log/eventlog_formatter.go89
-rw-r--r--vendor/github.com/prometheus/common/log/log.go365
-rw-r--r--vendor/github.com/prometheus/common/log/log_test.go39
-rw-r--r--vendor/github.com/prometheus/common/log/syslog_formatter.go119
-rw-r--r--vendor/github.com/prometheus/common/model/alert.go136
-rw-r--r--vendor/github.com/prometheus/common/model/alert_test.go118
-rw-r--r--vendor/github.com/prometheus/common/model/fingerprinting.go105
-rw-r--r--vendor/github.com/prometheus/common/model/fnv.go42
-rw-r--r--vendor/github.com/prometheus/common/model/labels.go210
-rw-r--r--vendor/github.com/prometheus/common/model/labels_test.go140
-rw-r--r--vendor/github.com/prometheus/common/model/labelset.go169
-rw-r--r--vendor/github.com/prometheus/common/model/metric.go103
-rw-r--r--vendor/github.com/prometheus/common/model/metric_test.go132
-rw-r--r--vendor/github.com/prometheus/common/model/model.go16
-rw-r--r--vendor/github.com/prometheus/common/model/signature.go144
-rw-r--r--vendor/github.com/prometheus/common/model/signature_test.go314
-rw-r--r--vendor/github.com/prometheus/common/model/silence.go106
-rw-r--r--vendor/github.com/prometheus/common/model/silence_test.go228
-rw-r--r--vendor/github.com/prometheus/common/model/time.go249
-rw-r--r--vendor/github.com/prometheus/common/model/time_test.go129
-rw-r--r--vendor/github.com/prometheus/common/model/value.go419
-rw-r--r--vendor/github.com/prometheus/common/model/value_test.go417
-rw-r--r--vendor/github.com/prometheus/common/route/route.go137
-rw-r--r--vendor/github.com/prometheus/common/route/route_test.go75
-rw-r--r--vendor/github.com/prometheus/common/version/info.go89
84 files changed, 8506 insertions, 0 deletions
diff --git a/vendor/github.com/prometheus/common/.travis.yml b/vendor/github.com/prometheus/common/.travis.yml
new file mode 100644
index 000000000..69b2431c8
--- /dev/null
+++ b/vendor/github.com/prometheus/common/.travis.yml
@@ -0,0 +1,7 @@
+sudo: false
+
+language: go
+go:
+ - 1.5.4
+ - 1.6.2
+ - tip
diff --git a/vendor/github.com/prometheus/common/AUTHORS.md b/vendor/github.com/prometheus/common/AUTHORS.md
new file mode 100644
index 000000000..c63f4d395
--- /dev/null
+++ b/vendor/github.com/prometheus/common/AUTHORS.md
@@ -0,0 +1,11 @@
+Maintainers of this repository:
+
+* Fabian Reinartz <fabian@soundcloud.com>
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Björn Rabenstein <beorn@soundcloud.com>
+* Fabian Reinartz <fabian@soundcloud.com>
+* Julius Volz <julius.volz@gmail.com>
+* Miguel Molina <hi@mvader.me>
diff --git a/vendor/github.com/prometheus/common/CONTRIBUTING.md b/vendor/github.com/prometheus/common/CONTRIBUTING.md
new file mode 100644
index 000000000..5705f0fbe
--- /dev/null
+++ b/vendor/github.com/prometheus/common/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/prometheus/common/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE
new file mode 100644
index 000000000..636a2c1a5
--- /dev/null
+++ b/vendor/github.com/prometheus/common/NOTICE
@@ -0,0 +1,5 @@
+Common libraries shared by Prometheus Go components.
+Copyright 2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/common/README.md b/vendor/github.com/prometheus/common/README.md
new file mode 100644
index 000000000..98f6ce24b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/README.md
@@ -0,0 +1,12 @@
+# Common
+[![Build Status](https://travis-ci.org/prometheus/common.svg)](https://travis-ci.org/prometheus/common)
+
+This repository contains Go libraries that are shared across Prometheus
+components and libraries.
+
+* **config**: Common configuration structures
+* **expfmt**: Decoding and encoding for the exposition format
+* **log**: A logging wrapper around [logrus](https://github.com/Sirupsen/logrus)
+* **model**: Shared data structures
+* **route**: A routing wrapper around [httprouter](https://github.com/julienschmidt/httprouter) using `context.Context`
+* **version**: Version informations and metric
diff --git a/vendor/github.com/prometheus/common/config/config.go b/vendor/github.com/prometheus/common/config/config.go
new file mode 100644
index 000000000..33eb922ce
--- /dev/null
+++ b/vendor/github.com/prometheus/common/config/config.go
@@ -0,0 +1,30 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "fmt"
+ "strings"
+)
+
+func checkOverflow(m map[string]interface{}, ctx string) error {
+ if len(m) > 0 {
+ var keys []string
+ for k := range m {
+ keys = append(keys, k)
+ }
+ return fmt.Errorf("unknown fields in %s: %s", ctx, strings.Join(keys, ", "))
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/config/testdata/tls_config.cert_no_key.bad.yml b/vendor/github.com/prometheus/common/config/testdata/tls_config.cert_no_key.bad.yml
new file mode 100644
index 000000000..7dfdc1ead
--- /dev/null
+++ b/vendor/github.com/prometheus/common/config/testdata/tls_config.cert_no_key.bad.yml
@@ -0,0 +1 @@
+cert_file: somefile
diff --git a/vendor/github.com/prometheus/common/config/testdata/tls_config.empty.good.yml b/vendor/github.com/prometheus/common/config/testdata/tls_config.empty.good.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/prometheus/common/config/testdata/tls_config.empty.good.yml
diff --git a/vendor/github.com/prometheus/common/config/testdata/tls_config.insecure.good.yml b/vendor/github.com/prometheus/common/config/testdata/tls_config.insecure.good.yml
new file mode 100644
index 000000000..d054383f1
--- /dev/null
+++ b/vendor/github.com/prometheus/common/config/testdata/tls_config.insecure.good.yml
@@ -0,0 +1 @@
+insecure_skip_verify: true
diff --git a/vendor/github.com/prometheus/common/config/testdata/tls_config.invalid_field.bad.yml b/vendor/github.com/prometheus/common/config/testdata/tls_config.invalid_field.bad.yml
new file mode 100644
index 000000000..12cbaac3b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/config/testdata/tls_config.invalid_field.bad.yml
@@ -0,0 +1 @@
+something_invalid: true
diff --git a/vendor/github.com/prometheus/common/config/testdata/tls_config.key_no_cert.bad.yml b/vendor/github.com/prometheus/common/config/testdata/tls_config.key_no_cert.bad.yml
new file mode 100644
index 000000000..cec045e89
--- /dev/null
+++ b/vendor/github.com/prometheus/common/config/testdata/tls_config.key_no_cert.bad.yml
@@ -0,0 +1 @@
+key_file: somefile
diff --git a/vendor/github.com/prometheus/common/config/tls_config.go b/vendor/github.com/prometheus/common/config/tls_config.go
new file mode 100644
index 000000000..7c7e7cb02
--- /dev/null
+++ b/vendor/github.com/prometheus/common/config/tls_config.go
@@ -0,0 +1,79 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+)
+
+// TLSConfig configures the options for TLS connections.
+type TLSConfig struct {
+ // The CA cert to use for the targets.
+ CAFile string `yaml:"ca_file,omitempty"`
+ // The client cert file for the targets.
+ CertFile string `yaml:"cert_file,omitempty"`
+ // The client key file for the targets.
+ KeyFile string `yaml:"key_file,omitempty"`
+ // Disable target certificate validation.
+ InsecureSkipVerify bool `yaml:"insecure_skip_verify"`
+
+ // Catches all undefined fields and must be empty after parsing.
+ XXX map[string]interface{} `yaml:",inline"`
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ type plain TLSConfig
+ if err := unmarshal((*plain)(c)); err != nil {
+ return err
+ }
+ return checkOverflow(c.XXX, "TLS config")
+}
+
+// GenerateConfig produces a tls.Config based on TLS connection options.
+// It loads certificate files from disk if they are defined.
+func (c *TLSConfig) GenerateConfig() (*tls.Config, error) {
+ tlsConfig := &tls.Config{InsecureSkipVerify: c.InsecureSkipVerify}
+
+ // If a CA cert is provided then let's read it in so we can validate the
+ // scrape target's certificate properly.
+ if len(c.CAFile) > 0 {
+ caCertPool := x509.NewCertPool()
+ // Load CA cert.
+ caCert, err := ioutil.ReadFile(c.CAFile)
+ if err != nil {
+ return nil, fmt.Errorf("unable to use specified CA cert %s: %s", c.CAFile, err)
+ }
+ caCertPool.AppendCertsFromPEM(caCert)
+ tlsConfig.RootCAs = caCertPool
+ }
+
+ if len(c.CertFile) > 0 && len(c.KeyFile) == 0 {
+ return nil, fmt.Errorf("client cert file %q specified without client key file", c.CertFile)
+ } else if len(c.KeyFile) > 0 && len(c.CertFile) == 0 {
+ return nil, fmt.Errorf("client key file %q specified without client cert file", c.KeyFile)
+ } else if len(c.CertFile) > 0 && len(c.KeyFile) > 0 {
+ cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile)
+ if err != nil {
+ return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{cert}
+ }
+ tlsConfig.BuildNameToCertificate()
+
+ return tlsConfig, nil
+}
diff --git a/vendor/github.com/prometheus/common/config/tls_config_test.go b/vendor/github.com/prometheus/common/config/tls_config_test.go
new file mode 100644
index 000000000..444303532
--- /dev/null
+++ b/vendor/github.com/prometheus/common/config/tls_config_test.go
@@ -0,0 +1,92 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "crypto/tls"
+ "io/ioutil"
+ "reflect"
+ "strings"
+ "testing"
+
+ "gopkg.in/yaml.v2"
+)
+
+// LoadTLSConfig parses the given YAML file into a tls.Config.
+func LoadTLSConfig(filename string) (*tls.Config, error) {
+ content, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ cfg := &TLSConfig{}
+ if err = yaml.Unmarshal(content, cfg); err != nil {
+ return nil, err
+ }
+ return cfg.GenerateConfig()
+}
+
+var expectedTLSConfigs = []struct {
+ filename string
+ config *tls.Config
+}{
+ {
+ filename: "tls_config.empty.good.yml",
+ config: &tls.Config{},
+ }, {
+ filename: "tls_config.insecure.good.yml",
+ config: &tls.Config{InsecureSkipVerify: true},
+ },
+}
+
+func TestValidTLSConfig(t *testing.T) {
+ for _, cfg := range expectedTLSConfigs {
+ cfg.config.BuildNameToCertificate()
+ got, err := LoadTLSConfig("testdata/" + cfg.filename)
+ if err != nil {
+ t.Errorf("Error parsing %s: %s", cfg.filename, err)
+ }
+ if !reflect.DeepEqual(*got, *cfg.config) {
+ t.Fatalf("%s: unexpected config result: \n\n%s\n expected\n\n%s", cfg.filename, got, cfg.config)
+ }
+ }
+}
+
+var expectedTLSConfigErrors = []struct {
+ filename string
+ errMsg string
+}{
+ {
+ filename: "tls_config.invalid_field.bad.yml",
+ errMsg: "unknown fields in",
+ }, {
+ filename: "tls_config.cert_no_key.bad.yml",
+ errMsg: "specified without client key file",
+ }, {
+ filename: "tls_config.key_no_cert.bad.yml",
+ errMsg: "specified without client cert file",
+ },
+}
+
+func TestBadTLSConfigs(t *testing.T) {
+ for _, ee := range expectedTLSConfigErrors {
+ _, err := LoadTLSConfig("testdata/" + ee.filename)
+ if err == nil {
+ t.Errorf("Expected error parsing %s but got none", ee.filename)
+ continue
+ }
+ if !strings.Contains(err.Error(), ee.errMsg) {
+ t.Errorf("Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/bench_test.go b/vendor/github.com/prometheus/common/expfmt/bench_test.go
new file mode 100644
index 000000000..e539bfc13
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/bench_test.go
@@ -0,0 +1,167 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bytes"
+ "compress/gzip"
+ "io"
+ "io/ioutil"
+ "testing"
+
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+var parser TextParser
+
+// Benchmarks to show how much penalty text format parsing actually inflicts.
+//
+// Example results on Linux 3.13.0, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz, go1.4.
+//
+// BenchmarkParseText 1000 1188535 ns/op 205085 B/op 6135 allocs/op
+// BenchmarkParseTextGzip 1000 1376567 ns/op 246224 B/op 6151 allocs/op
+// BenchmarkParseProto 10000 172790 ns/op 52258 B/op 1160 allocs/op
+// BenchmarkParseProtoGzip 5000 324021 ns/op 94931 B/op 1211 allocs/op
+// BenchmarkParseProtoMap 10000 187946 ns/op 58714 B/op 1203 allocs/op
+//
+// CONCLUSION: The overhead for the map is negligible. Text format needs ~5x more allocations.
+// Without compression, it needs ~7x longer, but with compression (the more relevant scenario),
+// the difference becomes less relevant, only ~4x.
+//
+// The test data contains 248 samples.
+
+// BenchmarkParseText benchmarks the parsing of a text-format scrape into metric
+// family DTOs.
+func BenchmarkParseText(b *testing.B) {
+ b.StopTimer()
+ data, err := ioutil.ReadFile("testdata/text")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ if _, err := parser.TextToMetricFamilies(bytes.NewReader(data)); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+// BenchmarkParseTextGzip benchmarks the parsing of a gzipped text-format scrape
+// into metric family DTOs.
+func BenchmarkParseTextGzip(b *testing.B) {
+ b.StopTimer()
+ data, err := ioutil.ReadFile("testdata/text.gz")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ in, err := gzip.NewReader(bytes.NewReader(data))
+ if err != nil {
+ b.Fatal(err)
+ }
+ if _, err := parser.TextToMetricFamilies(in); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+// BenchmarkParseProto benchmarks the parsing of a protobuf-format scrape into
+// metric family DTOs. Note that this does not build a map of metric families
+// (as the text version does), because it is not required for Prometheus
+// ingestion either. (However, it is required for the text-format parsing, as
+// the metric family might be sprinkled all over the text, while the
+// protobuf-format guarantees bundling at one place.)
+func BenchmarkParseProto(b *testing.B) {
+ b.StopTimer()
+ data, err := ioutil.ReadFile("testdata/protobuf")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ family := &dto.MetricFamily{}
+ in := bytes.NewReader(data)
+ for {
+ family.Reset()
+ if _, err := pbutil.ReadDelimited(in, family); err != nil {
+ if err == io.EOF {
+ break
+ }
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+// BenchmarkParseProtoGzip is like BenchmarkParseProto above, but parses gzipped
+// protobuf format.
+func BenchmarkParseProtoGzip(b *testing.B) {
+ b.StopTimer()
+ data, err := ioutil.ReadFile("testdata/protobuf.gz")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ family := &dto.MetricFamily{}
+ in, err := gzip.NewReader(bytes.NewReader(data))
+ if err != nil {
+ b.Fatal(err)
+ }
+ for {
+ family.Reset()
+ if _, err := pbutil.ReadDelimited(in, family); err != nil {
+ if err == io.EOF {
+ break
+ }
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+// BenchmarkParseProtoMap is like BenchmarkParseProto but DOES put the parsed
+// metric family DTOs into a map. This is not happening during Prometheus
+// ingestion. It is just here to measure the overhead of that map creation and
+// separate it from the overhead of the text format parsing.
+func BenchmarkParseProtoMap(b *testing.B) {
+ b.StopTimer()
+ data, err := ioutil.ReadFile("testdata/protobuf")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ families := map[string]*dto.MetricFamily{}
+ in := bytes.NewReader(data)
+ for {
+ family := &dto.MetricFamily{}
+ if _, err := pbutil.ReadDelimited(in, family); err != nil {
+ if err == io.EOF {
+ break
+ }
+ b.Fatal(err)
+ }
+ families[family.GetName()] = family
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
new file mode 100644
index 000000000..487fdc6cc
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -0,0 +1,412 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "mime"
+ "net/http"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/model"
+)
+
+// Decoder types decode an input stream into metric families.
+type Decoder interface {
+ Decode(*dto.MetricFamily) error
+}
+
+type DecodeOptions struct {
+ // Timestamp is added to each value from the stream that has no explicit timestamp set.
+ Timestamp model.Time
+}
+
+// ResponseFormat extracts the correct format from a HTTP response header.
+// If no matching format can be found FormatUnknown is returned.
+func ResponseFormat(h http.Header) Format {
+ ct := h.Get(hdrContentType)
+
+ mediatype, params, err := mime.ParseMediaType(ct)
+ if err != nil {
+ return FmtUnknown
+ }
+
+ const textType = "text/plain"
+
+ switch mediatype {
+ case ProtoType:
+ if p, ok := params["proto"]; ok && p != ProtoProtocol {
+ return FmtUnknown
+ }
+ if e, ok := params["encoding"]; ok && e != "delimited" {
+ return FmtUnknown
+ }
+ return FmtProtoDelim
+
+ case textType:
+ if v, ok := params["version"]; ok && v != TextVersion {
+ return FmtUnknown
+ }
+ return FmtText
+ }
+
+ return FmtUnknown
+}
+
+// NewDecoder returns a new decoder based on the given input format.
+// If the input format does not imply otherwise, a text format decoder is returned.
+func NewDecoder(r io.Reader, format Format) Decoder {
+ switch format {
+ case FmtProtoDelim:
+ return &protoDecoder{r: r}
+ }
+ return &textDecoder{r: r}
+}
+
+// protoDecoder implements the Decoder interface for protocol buffers.
+type protoDecoder struct {
+ r io.Reader
+}
+
+// Decode implements the Decoder interface.
+func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
+ _, err := pbutil.ReadDelimited(d.r, v)
+ if err != nil {
+ return err
+ }
+ if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
+ return fmt.Errorf("invalid metric name %q", v.GetName())
+ }
+ for _, m := range v.GetMetric() {
+ if m == nil {
+ continue
+ }
+ for _, l := range m.GetLabel() {
+ if l == nil {
+ continue
+ }
+ if !model.LabelValue(l.GetValue()).IsValid() {
+ return fmt.Errorf("invalid label value %q", l.GetValue())
+ }
+ if !model.LabelName(l.GetName()).IsValid() {
+ return fmt.Errorf("invalid label name %q", l.GetName())
+ }
+ }
+ }
+ return nil
+}
+
+// textDecoder implements the Decoder interface for the text protocol.
+type textDecoder struct {
+ r io.Reader
+ p TextParser
+ fams []*dto.MetricFamily
+}
+
+// Decode implements the Decoder interface.
+func (d *textDecoder) Decode(v *dto.MetricFamily) error {
+ // TODO(fabxc): Wrap this as a line reader to make streaming safer.
+ if len(d.fams) == 0 {
+ // No cached metric families, read everything and parse metrics.
+ fams, err := d.p.TextToMetricFamilies(d.r)
+ if err != nil {
+ return err
+ }
+ if len(fams) == 0 {
+ return io.EOF
+ }
+ d.fams = make([]*dto.MetricFamily, 0, len(fams))
+ for _, f := range fams {
+ d.fams = append(d.fams, f)
+ }
+ }
+
+ *v = *d.fams[0]
+ d.fams = d.fams[1:]
+
+ return nil
+}
+
+type SampleDecoder struct {
+ Dec Decoder
+ Opts *DecodeOptions
+
+ f dto.MetricFamily
+}
+
+func (sd *SampleDecoder) Decode(s *model.Vector) error {
+ if err := sd.Dec.Decode(&sd.f); err != nil {
+ return err
+ }
+ *s = extractSamples(&sd.f, sd.Opts)
+ return nil
+}
+
+// Extract samples builds a slice of samples from the provided metric families.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
+ var all model.Vector
+ for _, f := range fams {
+ all = append(all, extractSamples(f, o)...)
+ }
+ return all
+}
+
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
+ switch f.GetType() {
+ case dto.MetricType_COUNTER:
+ return extractCounter(o, f)
+ case dto.MetricType_GAUGE:
+ return extractGauge(o, f)
+ case dto.MetricType_SUMMARY:
+ return extractSummary(o, f)
+ case dto.MetricType_UNTYPED:
+ return extractUntyped(o, f)
+ case dto.MetricType_HISTOGRAM:
+ return extractHistogram(o, f)
+ }
+ panic("expfmt.extractSamples: unknown metric family type")
+}
+
+func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Counter == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Counter.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Gauge == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Gauge.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Untyped == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Untyped.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Summary == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ for _, q := range m.Summary.Quantile {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ // BUG(matt): Update other names to "quantile".
+ lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetValue()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ return samples
+}
+
+func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Histogram == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ infSeen := false
+
+ for _, q := range m.Histogram.Bucket {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetCumulativeCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ count := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleCount()),
+ Timestamp: timestamp,
+ }
+ samples = append(samples, count)
+
+ if !infSeen {
+ // Append an infinity bucket sample.
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: count.Value,
+ Timestamp: timestamp,
+ })
+ }
+ }
+
+ return samples
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/decode_test.go b/vendor/github.com/prometheus/common/expfmt/decode_test.go
new file mode 100644
index 000000000..c27325a9d
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/decode_test.go
@@ -0,0 +1,367 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "io"
+ "net/http"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+
+ "github.com/prometheus/common/model"
+)
+
+func TestTextDecoder(t *testing.T) {
+ var (
+ ts = model.Now()
+ in = `
+# Only a quite simple scenario with two metric families.
+# More complicated tests of the parser itself can be found in the text package.
+# TYPE mf2 counter
+mf2 3
+mf1{label="value1"} -3.14 123456
+mf1{label="value2"} 42
+mf2 4
+`
+ out = model.Vector{
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "mf1",
+ "label": "value1",
+ },
+ Value: -3.14,
+ Timestamp: 123456,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "mf1",
+ "label": "value2",
+ },
+ Value: 42,
+ Timestamp: ts,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "mf2",
+ },
+ Value: 3,
+ Timestamp: ts,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "mf2",
+ },
+ Value: 4,
+ Timestamp: ts,
+ },
+ }
+ )
+
+ dec := &SampleDecoder{
+ Dec: &textDecoder{r: strings.NewReader(in)},
+ Opts: &DecodeOptions{
+ Timestamp: ts,
+ },
+ }
+ var all model.Vector
+ for {
+ var smpls model.Vector
+ err := dec.Decode(&smpls)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ all = append(all, smpls...)
+ }
+ sort.Sort(all)
+ sort.Sort(out)
+ if !reflect.DeepEqual(all, out) {
+ t.Fatalf("output does not match")
+ }
+}
+
+func TestProtoDecoder(t *testing.T) {
+
+ var testTime = model.Now()
+
+ scenarios := []struct {
+ in string
+ expected model.Vector
+ fail bool
+ }{
+ {
+ in: "",
+ },
+ {
+ in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_!abel_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@",
+ fail: true,
+ },
+ {
+ in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_label_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@",
+ expected: model.Vector{
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count",
+ "some_label_name": "some_label_value",
+ },
+ Value: -42,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count",
+ "another_label_name": "another_label_value",
+ },
+ Value: 84,
+ Timestamp: testTime,
+ },
+ },
+ },
+ {
+ in: "\xb9\x01\n\rrequest_count\x12\x12Number of requests\x18\x02\"O\n#\n\x0fsome_label_name\x12\x10some_label_value\"(\x1a\x12\t\xaeG\xe1z\x14\xae\xef?\x11\x00\x00\x00\x00\x00\x00E\xc0\x1a\x12\t+\x87\x16\xd9\xce\xf7\xef?\x11\x00\x00\x00\x00\x00\x00U\xc0\"A\n)\n\x12another_label_name\x12\x13another_label_value\"\x14\x1a\x12\t\x00\x00\x00\x00\x00\x00\xe0?\x11\x00\x00\x00\x00\x00\x00$@",
+ expected: model.Vector{
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count_count",
+ "some_label_name": "some_label_value",
+ },
+ Value: 0,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count_sum",
+ "some_label_name": "some_label_value",
+ },
+ Value: 0,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count",
+ "some_label_name": "some_label_value",
+ "quantile": "0.99",
+ },
+ Value: -42,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count",
+ "some_label_name": "some_label_value",
+ "quantile": "0.999",
+ },
+ Value: -84,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count_count",
+ "another_label_name": "another_label_value",
+ },
+ Value: 0,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count_sum",
+ "another_label_name": "another_label_value",
+ },
+ Value: 0,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count",
+ "another_label_name": "another_label_value",
+ "quantile": "0.5",
+ },
+ Value: 10,
+ Timestamp: testTime,
+ },
+ },
+ },
+ {
+ in: "\x8d\x01\n\x1drequest_duration_microseconds\x12\x15The response latency.\x18\x04\"S:Q\b\x85\x15\x11\xcd\xcc\xccL\x8f\xcb:A\x1a\v\b{\x11\x00\x00\x00\x00\x00\x00Y@\x1a\f\b\x9c\x03\x11\x00\x00\x00\x00\x00\x00^@\x1a\f\b\xd0\x04\x11\x00\x00\x00\x00\x00\x00b@\x1a\f\b\xf4\v\x11\x9a\x99\x99\x99\x99\x99e@\x1a\f\b\x85\x15\x11\x00\x00\x00\x00\x00\x00\xf0\u007f",
+ expected: model.Vector{
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_bucket",
+ "le": "100",
+ },
+ Value: 123,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_bucket",
+ "le": "120",
+ },
+ Value: 412,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_bucket",
+ "le": "144",
+ },
+ Value: 592,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_bucket",
+ "le": "172.8",
+ },
+ Value: 1524,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_bucket",
+ "le": "+Inf",
+ },
+ Value: 2693,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_sum",
+ },
+ Value: 1756047.3,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_count",
+ },
+ Value: 2693,
+ Timestamp: testTime,
+ },
+ },
+ },
+ {
+ // The metric type is unset in this protobuf, which needs to be handled
+ // correctly by the decoder.
+ in: "\x1c\n\rrequest_count\"\v\x1a\t\t\x00\x00\x00\x00\x00\x00\xf0?",
+ expected: model.Vector{
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count",
+ },
+ Value: 1,
+ Timestamp: testTime,
+ },
+ },
+ },
+ }
+
+ for i, scenario := range scenarios {
+ dec := &SampleDecoder{
+ Dec: &protoDecoder{r: strings.NewReader(scenario.in)},
+ Opts: &DecodeOptions{
+ Timestamp: testTime,
+ },
+ }
+
+ var all model.Vector
+ for {
+ var smpls model.Vector
+ err := dec.Decode(&smpls)
+ if err == io.EOF {
+ break
+ }
+ if scenario.fail {
+ if err == nil {
+ t.Fatal("Expected error but got none")
+ }
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ all = append(all, smpls...)
+ }
+ sort.Sort(all)
+ sort.Sort(scenario.expected)
+ if !reflect.DeepEqual(all, scenario.expected) {
+ t.Fatalf("%d. output does not match, want: %#v, got %#v", i, scenario.expected, all)
+ }
+ }
+}
+
+func testDiscriminatorHTTPHeader(t testing.TB) {
+ var scenarios = []struct {
+ input map[string]string
+ output Format
+ err error
+ }{
+ {
+ input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="delimited"`},
+ output: FmtProtoDelim,
+ },
+ {
+ input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="illegal"; encoding="delimited"`},
+ output: FmtUnknown,
+ },
+ {
+ input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="illegal"`},
+ output: FmtUnknown,
+ },
+ {
+ input: map[string]string{"Content-Type": `text/plain; version=0.0.4`},
+ output: FmtText,
+ },
+ {
+ input: map[string]string{"Content-Type": `text/plain`},
+ output: FmtText,
+ },
+ {
+ input: map[string]string{"Content-Type": `text/plain; version=0.0.3`},
+ output: FmtUnknown,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ var header http.Header
+
+ if len(scenario.input) > 0 {
+ header = http.Header{}
+ }
+
+ for key, value := range scenario.input {
+ header.Add(key, value)
+ }
+
+ actual := ResponseFormat(header)
+
+ if scenario.output != actual {
+ t.Errorf("%d. expected %s, got %s", i, scenario.output, actual)
+ }
+ }
+}
+
+func TestDiscriminatorHTTPHeader(t *testing.T) {
+ testDiscriminatorHTTPHeader(t)
+}
+
+func BenchmarkDiscriminatorHTTPHeader(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testDiscriminatorHTTPHeader(b)
+ }
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
new file mode 100644
index 000000000..11839ed65
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Encoder types encode metric families into an underlying wire protocol.
+type Encoder interface {
+ Encode(*dto.MetricFamily) error
+}
+
+type encoder func(*dto.MetricFamily) error
+
+func (e encoder) Encode(v *dto.MetricFamily) error {
+ return e(v)
+}
+
+// Negotiate returns the Content-Type based on the given Accept header.
+// If no appropriate accepted type is found, FmtText is returned.
+func Negotiate(h http.Header) Format {
+ for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ // Check for protocol buffer
+ if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+ switch ac.Params["encoding"] {
+ case "delimited":
+ return FmtProtoDelim
+ case "text":
+ return FmtProtoText
+ case "compact-text":
+ return FmtProtoCompact
+ }
+ }
+ // Check for text format.
+ ver := ac.Params["version"]
+ if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+ return FmtText
+ }
+ }
+ return FmtText
+}
+
+// NewEncoder returns a new encoder based on content type negotiation.
+func NewEncoder(w io.Writer, format Format) Encoder {
+ switch format {
+ case FmtProtoDelim:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := pbutil.WriteDelimited(w, v)
+ return err
+ })
+ case FmtProtoCompact:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, v.String())
+ return err
+ })
+ case FmtProtoText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+ return err
+ })
+ case FmtText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := MetricFamilyToText(w, v)
+ return err
+ })
+ }
+ panic("expfmt.NewEncoder: unknown format")
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
new file mode 100644
index 000000000..fae10f6eb
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -0,0 +1,37 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A package for reading and writing Prometheus metrics.
+package expfmt
+
+type Format string
+
+const (
+ TextVersion = "0.0.4"
+
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+
+ // The Content-Type values for the different wire protocols.
+ FmtUnknown Format = `<unknown>`
+ FmtText Format = `text/plain; version=` + TextVersion
+ FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+ FmtProtoText Format = ProtoFmt + ` encoding=text`
+ FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+)
+
+const (
+ hdrContentType = "Content-Type"
+ hdrAccept = "Accept"
+)
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
new file mode 100644
index 000000000..dc2eedeef
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -0,0 +1,36 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Build only when actually fuzzing
+// +build gofuzz
+
+package expfmt
+
+import "bytes"
+
+// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
+//
+// go-fuzz-build github.com/prometheus/common/expfmt
+// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+//
+// Further input samples should go in the folder fuzz/corpus.
+func Fuzz(in []byte) int {
+ parser := TextParser{}
+ _, err := parser.TextToMetricFamilies(bytes.NewReader(in))
+
+ if err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0
new file mode 100644
index 000000000..139597f9c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0
@@ -0,0 +1,2 @@
+
+
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1
new file mode 100644
index 000000000..2ae870679
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1
@@ -0,0 +1,6 @@
+
+minimal_metric 1.234
+another_metric -3e3 103948
+# Even that:
+no_labels{} 3
+# HELP line for non-existing metric will be ignored.
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2
new file mode 100644
index 000000000..5c351db36
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2
@@ -0,0 +1,12 @@
+
+# A normal comment.
+#
+# TYPE name counter
+name{labelname="val1",basename="basevalue"} NaN
+name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890
+# HELP name two-line\n doc str\\ing
+
+ # HELP name2 doc str"ing 2
+ # TYPE name2 gauge
+name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321
+name2{ labelname = "val1" , }-Inf
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3
new file mode 100644
index 000000000..0b3c345aa
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3
@@ -0,0 +1,22 @@
+
+# TYPE my_summary summary
+my_summary{n1="val1",quantile="0.5"} 110
+decoy -1 -2
+my_summary{n1="val1",quantile="0.9"} 140 1
+my_summary_count{n1="val1"} 42
+# Latest timestamp wins in case of a summary.
+my_summary_sum{n1="val1"} 4711 2
+fake_sum{n1="val1"} 2001
+# TYPE another_summary summary
+another_summary_count{n2="val2",n1="val1"} 20
+my_summary_count{n2="val2",n1="val1"} 5 5
+another_summary{n1="val1",n2="val2",quantile=".3"} -1.2
+my_summary_sum{n1="val2"} 08 15
+my_summary{n1="val3", quantile="0.2"} 4711
+ my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN
+# some
+# funny comments
+# HELP
+# HELP
+# HELP my_summary
+# HELP my_summary
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4
new file mode 100644
index 000000000..bde0a387a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4
@@ -0,0 +1,10 @@
+
+# HELP request_duration_microseconds The response latency.
+# TYPE request_duration_microseconds histogram
+request_duration_microseconds_bucket{le="100"} 123
+request_duration_microseconds_bucket{le="120"} 412
+request_duration_microseconds_bucket{le="144"} 592
+request_duration_microseconds_bucket{le="172.8"} 1524
+request_duration_microseconds_bucket{le="+Inf"} 2693
+request_duration_microseconds_sum 1.7560473e+06
+request_duration_microseconds_count 2693
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0
new file mode 100644
index 000000000..4c67f9a19
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0
@@ -0,0 +1 @@
+bla 3.14 \ No newline at end of file
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1
new file mode 100644
index 000000000..b853478ee
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1
@@ -0,0 +1 @@
+metric{label="\t"} 3.14 \ No newline at end of file
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10
new file mode 100644
index 000000000..b5fe5f5a6
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10
@@ -0,0 +1 @@
+metric{label="bla"} 3.14 2 3
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11
new file mode 100644
index 000000000..57c7fbc0b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11
@@ -0,0 +1 @@
+metric{label="bla"} blubb
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12
new file mode 100644
index 000000000..0a9df79a1
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12
@@ -0,0 +1,3 @@
+
+# HELP metric one
+# HELP metric two
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13
new file mode 100644
index 000000000..5bc742781
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13
@@ -0,0 +1,3 @@
+
+# TYPE metric counter
+# TYPE metric untyped
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14
new file mode 100644
index 000000000..a9a24265b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14
@@ -0,0 +1,3 @@
+
+metric 4.12
+# TYPE metric counter
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15
new file mode 100644
index 000000000..7e95ca8f4
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15
@@ -0,0 +1,2 @@
+
+# TYPE metric bla
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16
new file mode 100644
index 000000000..7825f8887
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16
@@ -0,0 +1,2 @@
+
+# TYPE met-ric
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17
new file mode 100644
index 000000000..8f35cae0c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17
@@ -0,0 +1 @@
+@invalidmetric{label="bla"} 3.14 2 \ No newline at end of file
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18
new file mode 100644
index 000000000..7ca2cc268
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18
@@ -0,0 +1 @@
+{label="bla"} 3.14 2 \ No newline at end of file
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19
new file mode 100644
index 000000000..7a6ccc0dd
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19
@@ -0,0 +1,3 @@
+
+# TYPE metric histogram
+metric_bucket{le="bla"} 3.14
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2
new file mode 100644
index 000000000..726d0017c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2
@@ -0,0 +1,3 @@
+
+metric{label="new
+line"} 3.14
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3
new file mode 100644
index 000000000..6aa9e3081
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3
@@ -0,0 +1 @@
+metric{@="bla"} 3.14 \ No newline at end of file
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4
new file mode 100644
index 000000000..d112cb902
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4
@@ -0,0 +1 @@
+metric{__name__="bla"} 3.14 \ No newline at end of file
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5
new file mode 100644
index 000000000..b34554a8d
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5
@@ -0,0 +1 @@
+metric{label+="bla"} 3.14 \ No newline at end of file
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6
new file mode 100644
index 000000000..c4d7df3d1
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6
@@ -0,0 +1 @@
+metric{label=bla} 3.14 \ No newline at end of file
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7
new file mode 100644
index 000000000..97eafc4a6
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7
@@ -0,0 +1,3 @@
+
+# TYPE metric summary
+metric{quantile="bla"} 3.14
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8
new file mode 100644
index 000000000..fc706496b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8
@@ -0,0 +1 @@
+metric{label="bla"+} 3.14 \ No newline at end of file
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9
new file mode 100644
index 000000000..57b4879c0
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9
@@ -0,0 +1 @@
+metric{label="bla"} 3.14 2.72
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/minimal b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/minimal
new file mode 100644
index 000000000..be1e6a369
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/minimal
@@ -0,0 +1 @@
+m{} 0
diff --git a/vendor/github.com/prometheus/common/expfmt/testdata/json2 b/vendor/github.com/prometheus/common/expfmt/testdata/json2
new file mode 100644
index 000000000..b914c9386
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/testdata/json2
@@ -0,0 +1,46 @@
+[
+ {
+ "baseLabels": {
+ "__name__": "rpc_calls_total",
+ "job": "batch_job"
+ },
+ "docstring": "RPC calls.",
+ "metric": {
+ "type": "counter",
+ "value": [
+ {
+ "labels": {
+ "service": "zed"
+ },
+ "value": 25
+ },
+ {
+ "labels": {
+ "service": "bar"
+ },
+ "value": 24
+ }
+ ]
+ }
+ },
+ {
+ "baseLabels": {
+ "__name__": "rpc_latency_microseconds"
+ },
+ "docstring": "RPC latency.",
+ "metric": {
+ "type": "histogram",
+ "value": [
+ {
+ "labels": {
+ "service": "foo"
+ },
+ "value": {
+ "0.010000": 15,
+ "0.990000": 17
+ }
+ }
+ ]
+ }
+ }
+]
diff --git a/vendor/github.com/prometheus/common/expfmt/testdata/json2_bad b/vendor/github.com/prometheus/common/expfmt/testdata/json2_bad
new file mode 100644
index 000000000..cc6ac97c5
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/testdata/json2_bad
@@ -0,0 +1,46 @@
+[
+ {
+ "baseLabels": {
+ "__name__": "rpc_calls_total",
+ "job": "batch_job"
+ },
+ "docstring": "RPC calls.",
+ "metric": {
+ "type": "counter",
+ "value": [
+ {
+ "labels": {
+ "servic|e": "zed"
+ },
+ "value": 25
+ },
+ {
+ "labels": {
+ "service": "bar"
+ },
+ "value": 24
+ }
+ ]
+ }
+ },
+ {
+ "baseLabels": {
+ "__name__": "rpc_latency_microseconds"
+ },
+ "docstring": "RPC latency.",
+ "metric": {
+ "type": "histogram",
+ "value": [
+ {
+ "labels": {
+ "service": "foo"
+ },
+ "value": {
+ "0.010000": 15,
+ "0.990000": 17
+ }
+ }
+ ]
+ }
+ }
+]
diff --git a/vendor/github.com/prometheus/common/expfmt/testdata/protobuf b/vendor/github.com/prometheus/common/expfmt/testdata/protobuf
new file mode 100644
index 000000000..b2d018a7c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/testdata/protobuf
Binary files differ
diff --git a/vendor/github.com/prometheus/common/expfmt/testdata/protobuf.gz b/vendor/github.com/prometheus/common/expfmt/testdata/protobuf.gz
new file mode 100644
index 000000000..7622adb1c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/testdata/protobuf.gz
Binary files differ
diff --git a/vendor/github.com/prometheus/common/expfmt/testdata/text b/vendor/github.com/prometheus/common/expfmt/testdata/text
new file mode 100644
index 000000000..f3d8c3784
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/testdata/text
@@ -0,0 +1,322 @@
+# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
+# TYPE http_request_duration_microseconds summary
+http_request_duration_microseconds{handler="/",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/"} 0
+http_request_duration_microseconds_count{handler="/"} 0
+http_request_duration_microseconds{handler="/alerts",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/alerts",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/alerts",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/alerts"} 0
+http_request_duration_microseconds_count{handler="/alerts"} 0
+http_request_duration_microseconds{handler="/api/metrics",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/api/metrics",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/api/metrics",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/api/metrics"} 0
+http_request_duration_microseconds_count{handler="/api/metrics"} 0
+http_request_duration_microseconds{handler="/api/query",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/api/query",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/api/query",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/api/query"} 0
+http_request_duration_microseconds_count{handler="/api/query"} 0
+http_request_duration_microseconds{handler="/api/query_range",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/api/query_range",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/api/query_range",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/api/query_range"} 0
+http_request_duration_microseconds_count{handler="/api/query_range"} 0
+http_request_duration_microseconds{handler="/api/targets",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/api/targets",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/api/targets",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/api/targets"} 0
+http_request_duration_microseconds_count{handler="/api/targets"} 0
+http_request_duration_microseconds{handler="/consoles/",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/consoles/",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/consoles/",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/consoles/"} 0
+http_request_duration_microseconds_count{handler="/consoles/"} 0
+http_request_duration_microseconds{handler="/graph",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/graph",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/graph",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/graph"} 0
+http_request_duration_microseconds_count{handler="/graph"} 0
+http_request_duration_microseconds{handler="/heap",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/heap",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/heap",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/heap"} 0
+http_request_duration_microseconds_count{handler="/heap"} 0
+http_request_duration_microseconds{handler="/static/",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/static/",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/static/",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/static/"} 0
+http_request_duration_microseconds_count{handler="/static/"} 0
+http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1307.275
+http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1858.632
+http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 3087.384
+http_request_duration_microseconds_sum{handler="prometheus"} 179886.5000000001
+http_request_duration_microseconds_count{handler="prometheus"} 119
+# HELP http_request_size_bytes The HTTP request sizes in bytes.
+# TYPE http_request_size_bytes summary
+http_request_size_bytes{handler="/",quantile="0.5"} 0
+http_request_size_bytes{handler="/",quantile="0.9"} 0
+http_request_size_bytes{handler="/",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/"} 0
+http_request_size_bytes_count{handler="/"} 0
+http_request_size_bytes{handler="/alerts",quantile="0.5"} 0
+http_request_size_bytes{handler="/alerts",quantile="0.9"} 0
+http_request_size_bytes{handler="/alerts",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/alerts"} 0
+http_request_size_bytes_count{handler="/alerts"} 0
+http_request_size_bytes{handler="/api/metrics",quantile="0.5"} 0
+http_request_size_bytes{handler="/api/metrics",quantile="0.9"} 0
+http_request_size_bytes{handler="/api/metrics",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/api/metrics"} 0
+http_request_size_bytes_count{handler="/api/metrics"} 0
+http_request_size_bytes{handler="/api/query",quantile="0.5"} 0
+http_request_size_bytes{handler="/api/query",quantile="0.9"} 0
+http_request_size_bytes{handler="/api/query",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/api/query"} 0
+http_request_size_bytes_count{handler="/api/query"} 0
+http_request_size_bytes{handler="/api/query_range",quantile="0.5"} 0
+http_request_size_bytes{handler="/api/query_range",quantile="0.9"} 0
+http_request_size_bytes{handler="/api/query_range",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/api/query_range"} 0
+http_request_size_bytes_count{handler="/api/query_range"} 0
+http_request_size_bytes{handler="/api/targets",quantile="0.5"} 0
+http_request_size_bytes{handler="/api/targets",quantile="0.9"} 0
+http_request_size_bytes{handler="/api/targets",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/api/targets"} 0
+http_request_size_bytes_count{handler="/api/targets"} 0
+http_request_size_bytes{handler="/consoles/",quantile="0.5"} 0
+http_request_size_bytes{handler="/consoles/",quantile="0.9"} 0
+http_request_size_bytes{handler="/consoles/",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/consoles/"} 0
+http_request_size_bytes_count{handler="/consoles/"} 0
+http_request_size_bytes{handler="/graph",quantile="0.5"} 0
+http_request_size_bytes{handler="/graph",quantile="0.9"} 0
+http_request_size_bytes{handler="/graph",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/graph"} 0
+http_request_size_bytes_count{handler="/graph"} 0
+http_request_size_bytes{handler="/heap",quantile="0.5"} 0
+http_request_size_bytes{handler="/heap",quantile="0.9"} 0
+http_request_size_bytes{handler="/heap",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/heap"} 0
+http_request_size_bytes_count{handler="/heap"} 0
+http_request_size_bytes{handler="/static/",quantile="0.5"} 0
+http_request_size_bytes{handler="/static/",quantile="0.9"} 0
+http_request_size_bytes{handler="/static/",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/static/"} 0
+http_request_size_bytes_count{handler="/static/"} 0
+http_request_size_bytes{handler="prometheus",quantile="0.5"} 291
+http_request_size_bytes{handler="prometheus",quantile="0.9"} 291
+http_request_size_bytes{handler="prometheus",quantile="0.99"} 291
+http_request_size_bytes_sum{handler="prometheus"} 34488
+http_request_size_bytes_count{handler="prometheus"} 119
+# HELP http_requests_total Total number of HTTP requests made.
+# TYPE http_requests_total counter
+http_requests_total{code="200",handler="prometheus",method="get"} 119
+# HELP http_response_size_bytes The HTTP response sizes in bytes.
+# TYPE http_response_size_bytes summary
+http_response_size_bytes{handler="/",quantile="0.5"} 0
+http_response_size_bytes{handler="/",quantile="0.9"} 0
+http_response_size_bytes{handler="/",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/"} 0
+http_response_size_bytes_count{handler="/"} 0
+http_response_size_bytes{handler="/alerts",quantile="0.5"} 0
+http_response_size_bytes{handler="/alerts",quantile="0.9"} 0
+http_response_size_bytes{handler="/alerts",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/alerts"} 0
+http_response_size_bytes_count{handler="/alerts"} 0
+http_response_size_bytes{handler="/api/metrics",quantile="0.5"} 0
+http_response_size_bytes{handler="/api/metrics",quantile="0.9"} 0
+http_response_size_bytes{handler="/api/metrics",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/api/metrics"} 0
+http_response_size_bytes_count{handler="/api/metrics"} 0
+http_response_size_bytes{handler="/api/query",quantile="0.5"} 0
+http_response_size_bytes{handler="/api/query",quantile="0.9"} 0
+http_response_size_bytes{handler="/api/query",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/api/query"} 0
+http_response_size_bytes_count{handler="/api/query"} 0
+http_response_size_bytes{handler="/api/query_range",quantile="0.5"} 0
+http_response_size_bytes{handler="/api/query_range",quantile="0.9"} 0
+http_response_size_bytes{handler="/api/query_range",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/api/query_range"} 0
+http_response_size_bytes_count{handler="/api/query_range"} 0
+http_response_size_bytes{handler="/api/targets",quantile="0.5"} 0
+http_response_size_bytes{handler="/api/targets",quantile="0.9"} 0
+http_response_size_bytes{handler="/api/targets",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/api/targets"} 0
+http_response_size_bytes_count{handler="/api/targets"} 0
+http_response_size_bytes{handler="/consoles/",quantile="0.5"} 0
+http_response_size_bytes{handler="/consoles/",quantile="0.9"} 0
+http_response_size_bytes{handler="/consoles/",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/consoles/"} 0
+http_response_size_bytes_count{handler="/consoles/"} 0
+http_response_size_bytes{handler="/graph",quantile="0.5"} 0
+http_response_size_bytes{handler="/graph",quantile="0.9"} 0
+http_response_size_bytes{handler="/graph",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/graph"} 0
+http_response_size_bytes_count{handler="/graph"} 0
+http_response_size_bytes{handler="/heap",quantile="0.5"} 0
+http_response_size_bytes{handler="/heap",quantile="0.9"} 0
+http_response_size_bytes{handler="/heap",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/heap"} 0
+http_response_size_bytes_count{handler="/heap"} 0
+http_response_size_bytes{handler="/static/",quantile="0.5"} 0
+http_response_size_bytes{handler="/static/",quantile="0.9"} 0
+http_response_size_bytes{handler="/static/",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/static/"} 0
+http_response_size_bytes_count{handler="/static/"} 0
+http_response_size_bytes{handler="prometheus",quantile="0.5"} 2049
+http_response_size_bytes{handler="prometheus",quantile="0.9"} 2058
+http_response_size_bytes{handler="prometheus",quantile="0.99"} 2064
+http_response_size_bytes_sum{handler="prometheus"} 247001
+http_response_size_bytes_count{handler="prometheus"} 119
+# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
+# TYPE process_cpu_seconds_total counter
+process_cpu_seconds_total 0.55
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 70
+# HELP process_max_fds Maximum number of open file descriptors.
+# TYPE process_max_fds gauge
+process_max_fds 8192
+# HELP process_open_fds Number of open file descriptors.
+# TYPE process_open_fds gauge
+process_open_fds 29
+# HELP process_resident_memory_bytes Resident memory size in bytes.
+# TYPE process_resident_memory_bytes gauge
+process_resident_memory_bytes 5.3870592e+07
+# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
+# TYPE process_start_time_seconds gauge
+process_start_time_seconds 1.42236894836e+09
+# HELP process_virtual_memory_bytes Virtual memory size in bytes.
+# TYPE process_virtual_memory_bytes gauge
+process_virtual_memory_bytes 5.41478912e+08
+# HELP prometheus_dns_sd_lookup_failures_total The number of DNS-SD lookup failures.
+# TYPE prometheus_dns_sd_lookup_failures_total counter
+prometheus_dns_sd_lookup_failures_total 0
+# HELP prometheus_dns_sd_lookups_total The number of DNS-SD lookups.
+# TYPE prometheus_dns_sd_lookups_total counter
+prometheus_dns_sd_lookups_total 7
+# HELP prometheus_evaluator_duration_milliseconds The duration for all evaluations to execute.
+# TYPE prometheus_evaluator_duration_milliseconds summary
+prometheus_evaluator_duration_milliseconds{quantile="0.01"} 0
+prometheus_evaluator_duration_milliseconds{quantile="0.05"} 0
+prometheus_evaluator_duration_milliseconds{quantile="0.5"} 0
+prometheus_evaluator_duration_milliseconds{quantile="0.9"} 1
+prometheus_evaluator_duration_milliseconds{quantile="0.99"} 1
+prometheus_evaluator_duration_milliseconds_sum 12
+prometheus_evaluator_duration_milliseconds_count 23
+# HELP prometheus_local_storage_checkpoint_duration_milliseconds The duration (in milliseconds) it took to checkpoint in-memory metrics and head chunks.
+# TYPE prometheus_local_storage_checkpoint_duration_milliseconds gauge
+prometheus_local_storage_checkpoint_duration_milliseconds 0
+# HELP prometheus_local_storage_chunk_ops_total The total number of chunk operations by their type.
+# TYPE prometheus_local_storage_chunk_ops_total counter
+prometheus_local_storage_chunk_ops_total{type="create"} 598
+prometheus_local_storage_chunk_ops_total{type="persist"} 174
+prometheus_local_storage_chunk_ops_total{type="pin"} 920
+prometheus_local_storage_chunk_ops_total{type="transcode"} 415
+prometheus_local_storage_chunk_ops_total{type="unpin"} 920
+# HELP prometheus_local_storage_indexing_batch_latency_milliseconds Quantiles for batch indexing latencies in milliseconds.
+# TYPE prometheus_local_storage_indexing_batch_latency_milliseconds summary
+prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.5"} 0
+prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.9"} 0
+prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.99"} 0
+prometheus_local_storage_indexing_batch_latency_milliseconds_sum 0
+prometheus_local_storage_indexing_batch_latency_milliseconds_count 1
+# HELP prometheus_local_storage_indexing_batch_sizes Quantiles for indexing batch sizes (number of metrics per batch).
+# TYPE prometheus_local_storage_indexing_batch_sizes summary
+prometheus_local_storage_indexing_batch_sizes{quantile="0.5"} 2
+prometheus_local_storage_indexing_batch_sizes{quantile="0.9"} 2
+prometheus_local_storage_indexing_batch_sizes{quantile="0.99"} 2
+prometheus_local_storage_indexing_batch_sizes_sum 2
+prometheus_local_storage_indexing_batch_sizes_count 1
+# HELP prometheus_local_storage_indexing_queue_capacity The capacity of the indexing queue.
+# TYPE prometheus_local_storage_indexing_queue_capacity gauge
+prometheus_local_storage_indexing_queue_capacity 16384
+# HELP prometheus_local_storage_indexing_queue_length The number of metrics waiting to be indexed.
+# TYPE prometheus_local_storage_indexing_queue_length gauge
+prometheus_local_storage_indexing_queue_length 0
+# HELP prometheus_local_storage_ingested_samples_total The total number of samples ingested.
+# TYPE prometheus_local_storage_ingested_samples_total counter
+prometheus_local_storage_ingested_samples_total 30473
+# HELP prometheus_local_storage_invalid_preload_requests_total The total number of preload requests referring to a non-existent series. This is an indication of outdated label indexes.
+# TYPE prometheus_local_storage_invalid_preload_requests_total counter
+prometheus_local_storage_invalid_preload_requests_total 0
+# HELP prometheus_local_storage_memory_chunkdescs The current number of chunk descriptors in memory.
+# TYPE prometheus_local_storage_memory_chunkdescs gauge
+prometheus_local_storage_memory_chunkdescs 1059
+# HELP prometheus_local_storage_memory_chunks The current number of chunks in memory, excluding cloned chunks (i.e. chunks without a descriptor).
+# TYPE prometheus_local_storage_memory_chunks gauge
+prometheus_local_storage_memory_chunks 1020
+# HELP prometheus_local_storage_memory_series The current number of series in memory.
+# TYPE prometheus_local_storage_memory_series gauge
+prometheus_local_storage_memory_series 424
+# HELP prometheus_local_storage_persist_latency_microseconds A summary of latencies for persisting each chunk.
+# TYPE prometheus_local_storage_persist_latency_microseconds summary
+prometheus_local_storage_persist_latency_microseconds{quantile="0.5"} 30.377
+prometheus_local_storage_persist_latency_microseconds{quantile="0.9"} 203.539
+prometheus_local_storage_persist_latency_microseconds{quantile="0.99"} 2626.463
+prometheus_local_storage_persist_latency_microseconds_sum 20424.415
+prometheus_local_storage_persist_latency_microseconds_count 174
+# HELP prometheus_local_storage_persist_queue_capacity The total capacity of the persist queue.
+# TYPE prometheus_local_storage_persist_queue_capacity gauge
+prometheus_local_storage_persist_queue_capacity 1024
+# HELP prometheus_local_storage_persist_queue_length The current number of chunks waiting in the persist queue.
+# TYPE prometheus_local_storage_persist_queue_length gauge
+prometheus_local_storage_persist_queue_length 0
+# HELP prometheus_local_storage_series_ops_total The total number of series operations by their type.
+# TYPE prometheus_local_storage_series_ops_total counter
+prometheus_local_storage_series_ops_total{type="create"} 2
+prometheus_local_storage_series_ops_total{type="maintenance_in_memory"} 11
+# HELP prometheus_notifications_latency_milliseconds Latency quantiles for sending alert notifications (not including dropped notifications).
+# TYPE prometheus_notifications_latency_milliseconds summary
+prometheus_notifications_latency_milliseconds{quantile="0.5"} 0
+prometheus_notifications_latency_milliseconds{quantile="0.9"} 0
+prometheus_notifications_latency_milliseconds{quantile="0.99"} 0
+prometheus_notifications_latency_milliseconds_sum 0
+prometheus_notifications_latency_milliseconds_count 0
+# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue.
+# TYPE prometheus_notifications_queue_capacity gauge
+prometheus_notifications_queue_capacity 100
+# HELP prometheus_notifications_queue_length The number of alert notifications in the queue.
+# TYPE prometheus_notifications_queue_length gauge
+prometheus_notifications_queue_length 0
+# HELP prometheus_rule_evaluation_duration_milliseconds The duration for a rule to execute.
+# TYPE prometheus_rule_evaluation_duration_milliseconds summary
+prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.5"} 0
+prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.9"} 0
+prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.99"} 2
+prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="alerting"} 12
+prometheus_rule_evaluation_duration_milliseconds_count{rule_type="alerting"} 115
+prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.5"} 0
+prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.9"} 0
+prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.99"} 3
+prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="recording"} 15
+prometheus_rule_evaluation_duration_milliseconds_count{rule_type="recording"} 115
+# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures.
+# TYPE prometheus_rule_evaluation_failures_total counter
+prometheus_rule_evaluation_failures_total 0
+# HELP prometheus_samples_queue_capacity Capacity of the queue for unwritten samples.
+# TYPE prometheus_samples_queue_capacity gauge
+prometheus_samples_queue_capacity 4096
+# HELP prometheus_samples_queue_length Current number of items in the queue for unwritten samples. Each item comprises all samples exposed by one target as one metric family (i.e. metrics of the same name).
+# TYPE prometheus_samples_queue_length gauge
+prometheus_samples_queue_length 0
+# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes.
+# TYPE prometheus_target_interval_length_seconds summary
+prometheus_target_interval_length_seconds{interval="15s",quantile="0.01"} 14
+prometheus_target_interval_length_seconds{interval="15s",quantile="0.05"} 14
+prometheus_target_interval_length_seconds{interval="15s",quantile="0.5"} 15
+prometheus_target_interval_length_seconds{interval="15s",quantile="0.9"} 15
+prometheus_target_interval_length_seconds{interval="15s",quantile="0.99"} 15
+prometheus_target_interval_length_seconds_sum{interval="15s"} 175
+prometheus_target_interval_length_seconds_count{interval="15s"} 12
+prometheus_target_interval_length_seconds{interval="1s",quantile="0.01"} 0
+prometheus_target_interval_length_seconds{interval="1s",quantile="0.05"} 0
+prometheus_target_interval_length_seconds{interval="1s",quantile="0.5"} 0
+prometheus_target_interval_length_seconds{interval="1s",quantile="0.9"} 1
+prometheus_target_interval_length_seconds{interval="1s",quantile="0.99"} 1
+prometheus_target_interval_length_seconds_sum{interval="1s"} 55
+prometheus_target_interval_length_seconds_count{interval="1s"} 117
diff --git a/vendor/github.com/prometheus/common/expfmt/testdata/text.gz b/vendor/github.com/prometheus/common/expfmt/testdata/text.gz
new file mode 100644
index 000000000..b7658c84d
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/testdata/text.gz
Binary files differ
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
new file mode 100644
index 000000000..f11321cd0
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -0,0 +1,303 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+)
+
+// MetricFamilyToText converts a MetricFamily proto message into text format and
+// writes the resulting lines to 'out'. It returns the number of bytes written
+// and any error encountered. The output will have the same order as the input,
+// no further sorting is performed. Furthermore, this function assumes the input
+// is already sanitized and does not perform any sanity checks. If the input
+// contains duplicate metrics or invalid metric or label names, the conversion
+// will result in invalid text format output.
+//
+// This method fulfills the type 'prometheus.encoder'.
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
+ var written int
+
+ // Fail-fast checks.
+ if len(in.Metric) == 0 {
+ return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
+ }
+ name := in.GetName()
+ if name == "" {
+ return written, fmt.Errorf("MetricFamily has no name: %s", in)
+ }
+
+ // Comments, first HELP, then TYPE.
+ if in.Help != nil {
+ n, err := fmt.Fprintf(
+ out, "# HELP %s %s\n",
+ name, escapeString(*in.Help, false),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ metricType := in.GetType()
+ n, err := fmt.Fprintf(
+ out, "# TYPE %s %s\n",
+ name, strings.ToLower(metricType.String()),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ // Finally the samples, one line for each.
+ for _, metric := range in.Metric {
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ if metric.Counter == nil {
+ return written, fmt.Errorf(
+ "expected counter in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Counter.GetValue(),
+ out,
+ )
+ case dto.MetricType_GAUGE:
+ if metric.Gauge == nil {
+ return written, fmt.Errorf(
+ "expected gauge in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Gauge.GetValue(),
+ out,
+ )
+ case dto.MetricType_UNTYPED:
+ if metric.Untyped == nil {
+ return written, fmt.Errorf(
+ "expected untyped in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Untyped.GetValue(),
+ out,
+ )
+ case dto.MetricType_SUMMARY:
+ if metric.Summary == nil {
+ return written, fmt.Errorf(
+ "expected summary in metric %s %s", name, metric,
+ )
+ }
+ for _, q := range metric.Summary.Quantile {
+ n, err = writeSample(
+ name, metric,
+ model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
+ q.GetValue(),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Summary.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Summary.GetSampleCount()),
+ out,
+ )
+ case dto.MetricType_HISTOGRAM:
+ if metric.Histogram == nil {
+ return written, fmt.Errorf(
+ "expected histogram in metric %s %s", name, metric,
+ )
+ }
+ infSeen := false
+ for _, q := range metric.Histogram.Bucket {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
+ float64(q.GetCumulativeCount()),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+ }
+ if !infSeen {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, "+Inf",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Histogram.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ default:
+ return written, fmt.Errorf(
+ "unexpected type in metric %s %s", name, metric,
+ )
+ }
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+}
+
+// writeSample writes a single sample in text format to out, given the metric
+// name, the metric proto message itself, optionally an additional label name
+// and value (use empty strings if not required), and the value. The function
+// returns the number of bytes written and any error encountered.
+func writeSample(
+ name string,
+ metric *dto.Metric,
+ additionalLabelName, additionalLabelValue string,
+ value float64,
+ out io.Writer,
+) (int, error) {
+ var written int
+ n, err := fmt.Fprint(out, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = labelPairsToText(
+ metric.Label,
+ additionalLabelName, additionalLabelValue,
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = fmt.Fprintf(out, " %v", value)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if metric.TimestampMs != nil {
+ n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = out.Write([]byte{'\n'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// labelPairsToText converts a slice of LabelPair proto messages plus the
+// explicitly given additional label pair into text formatted as required by the
+// text format and writes it to 'out'. An empty slice in combination with an
+// empty string 'additionalLabelName' results in nothing being
+// written. Otherwise, the label pairs are written, escaped as required by the
+// text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered.
+func labelPairsToText(
+ in []*dto.LabelPair,
+ additionalLabelName, additionalLabelValue string,
+ out io.Writer,
+) (int, error) {
+ if len(in) == 0 && additionalLabelName == "" {
+ return 0, nil
+ }
+ var written int
+ separator := '{'
+ for _, lp := range in {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, lp.GetName(), escapeString(lp.GetValue(), true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ if additionalLabelName != "" {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, additionalLabelName,
+ escapeString(additionalLabelValue, true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err := out.Write([]byte{'}'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+var (
+ escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
+ escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
+)
+
+// escapeString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
+func escapeString(v string, includeDoubleQuote bool) string {
+ if includeDoubleQuote {
+ return escapeWithDoubleQuote.Replace(v)
+ }
+
+ return escape.Replace(v)
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create_test.go b/vendor/github.com/prometheus/common/expfmt/text_create_test.go
new file mode 100644
index 000000000..e4cc5d803
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_create_test.go
@@ -0,0 +1,443 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bytes"
+ "math"
+ "strings"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+func testCreate(t testing.TB) {
+ var scenarios = []struct {
+ in *dto.MetricFamily
+ out string
+ }{
+ // 0: Counter, NaN as value, timestamp given.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("two-line\n doc str\\ing"),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("labelname"),
+ Value: proto.String("val1"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("basename"),
+ Value: proto.String("basevalue"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(math.NaN()),
+ },
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("labelname"),
+ Value: proto.String("val2"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("basename"),
+ Value: proto.String("basevalue"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(.23),
+ },
+ TimestampMs: proto.Int64(1234567890),
+ },
+ },
+ },
+ out: `# HELP name two-line\n doc str\\ing
+# TYPE name counter
+name{labelname="val1",basename="basevalue"} NaN
+name{labelname="val2",basename="basevalue"} 0.23 1234567890
+`,
+ },
+ // 1: Gauge, some escaping required, +Inf as value, multi-byte characters in label values.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("gauge_name"),
+ Help: proto.String("gauge\ndoc\nstr\"ing"),
+ Type: dto.MetricType_GAUGE.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("name_1"),
+ Value: proto.String("val with\nnew line"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("name_2"),
+ Value: proto.String("val with \\backslash and \"quotes\""),
+ },
+ },
+ Gauge: &dto.Gauge{
+ Value: proto.Float64(math.Inf(+1)),
+ },
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("name_1"),
+ Value: proto.String("Björn"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("name_2"),
+ Value: proto.String("佖佥"),
+ },
+ },
+ Gauge: &dto.Gauge{
+ Value: proto.Float64(3.14E42),
+ },
+ },
+ },
+ },
+ out: `# HELP gauge_name gauge\ndoc\nstr"ing
+# TYPE gauge_name gauge
+gauge_name{name_1="val with\nnew line",name_2="val with \\backslash and \"quotes\""} +Inf
+gauge_name{name_1="Björn",name_2="佖佥"} 3.14e+42
+`,
+ },
+ // 2: Untyped, no help, one sample with no labels and -Inf as value, another sample with one label.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("untyped_name"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(math.Inf(-1)),
+ },
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("name_1"),
+ Value: proto.String("value 1"),
+ },
+ },
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(-1.23e-45),
+ },
+ },
+ },
+ },
+ out: `# TYPE untyped_name untyped
+untyped_name -Inf
+untyped_name{name_1="value 1"} -1.23e-45
+`,
+ },
+ // 3: Summary.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("summary_name"),
+ Help: proto.String("summary docstring"),
+ Type: dto.MetricType_SUMMARY.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Summary: &dto.Summary{
+ SampleCount: proto.Uint64(42),
+ SampleSum: proto.Float64(-3.4567),
+ Quantile: []*dto.Quantile{
+ &dto.Quantile{
+ Quantile: proto.Float64(0.5),
+ Value: proto.Float64(-1.23),
+ },
+ &dto.Quantile{
+ Quantile: proto.Float64(0.9),
+ Value: proto.Float64(.2342354),
+ },
+ &dto.Quantile{
+ Quantile: proto.Float64(0.99),
+ Value: proto.Float64(0),
+ },
+ },
+ },
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("name_1"),
+ Value: proto.String("value 1"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("name_2"),
+ Value: proto.String("value 2"),
+ },
+ },
+ Summary: &dto.Summary{
+ SampleCount: proto.Uint64(4711),
+ SampleSum: proto.Float64(2010.1971),
+ Quantile: []*dto.Quantile{
+ &dto.Quantile{
+ Quantile: proto.Float64(0.5),
+ Value: proto.Float64(1),
+ },
+ &dto.Quantile{
+ Quantile: proto.Float64(0.9),
+ Value: proto.Float64(2),
+ },
+ &dto.Quantile{
+ Quantile: proto.Float64(0.99),
+ Value: proto.Float64(3),
+ },
+ },
+ },
+ },
+ },
+ },
+ out: `# HELP summary_name summary docstring
+# TYPE summary_name summary
+summary_name{quantile="0.5"} -1.23
+summary_name{quantile="0.9"} 0.2342354
+summary_name{quantile="0.99"} 0
+summary_name_sum -3.4567
+summary_name_count 42
+summary_name{name_1="value 1",name_2="value 2",quantile="0.5"} 1
+summary_name{name_1="value 1",name_2="value 2",quantile="0.9"} 2
+summary_name{name_1="value 1",name_2="value 2",quantile="0.99"} 3
+summary_name_sum{name_1="value 1",name_2="value 2"} 2010.1971
+summary_name_count{name_1="value 1",name_2="value 2"} 4711
+`,
+ },
+ // 4: Histogram
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("request_duration_microseconds"),
+ Help: proto.String("The response latency."),
+ Type: dto.MetricType_HISTOGRAM.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Histogram: &dto.Histogram{
+ SampleCount: proto.Uint64(2693),
+ SampleSum: proto.Float64(1756047.3),
+ Bucket: []*dto.Bucket{
+ &dto.Bucket{
+ UpperBound: proto.Float64(100),
+ CumulativeCount: proto.Uint64(123),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(120),
+ CumulativeCount: proto.Uint64(412),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(144),
+ CumulativeCount: proto.Uint64(592),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(172.8),
+ CumulativeCount: proto.Uint64(1524),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(math.Inf(+1)),
+ CumulativeCount: proto.Uint64(2693),
+ },
+ },
+ },
+ },
+ },
+ },
+ out: `# HELP request_duration_microseconds The response latency.
+# TYPE request_duration_microseconds histogram
+request_duration_microseconds_bucket{le="100"} 123
+request_duration_microseconds_bucket{le="120"} 412
+request_duration_microseconds_bucket{le="144"} 592
+request_duration_microseconds_bucket{le="172.8"} 1524
+request_duration_microseconds_bucket{le="+Inf"} 2693
+request_duration_microseconds_sum 1.7560473e+06
+request_duration_microseconds_count 2693
+`,
+ },
+ // 5: Histogram with missing +Inf bucket.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("request_duration_microseconds"),
+ Help: proto.String("The response latency."),
+ Type: dto.MetricType_HISTOGRAM.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Histogram: &dto.Histogram{
+ SampleCount: proto.Uint64(2693),
+ SampleSum: proto.Float64(1756047.3),
+ Bucket: []*dto.Bucket{
+ &dto.Bucket{
+ UpperBound: proto.Float64(100),
+ CumulativeCount: proto.Uint64(123),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(120),
+ CumulativeCount: proto.Uint64(412),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(144),
+ CumulativeCount: proto.Uint64(592),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(172.8),
+ CumulativeCount: proto.Uint64(1524),
+ },
+ },
+ },
+ },
+ },
+ },
+ out: `# HELP request_duration_microseconds The response latency.
+# TYPE request_duration_microseconds histogram
+request_duration_microseconds_bucket{le="100"} 123
+request_duration_microseconds_bucket{le="120"} 412
+request_duration_microseconds_bucket{le="144"} 592
+request_duration_microseconds_bucket{le="172.8"} 1524
+request_duration_microseconds_bucket{le="+Inf"} 2693
+request_duration_microseconds_sum 1.7560473e+06
+request_duration_microseconds_count 2693
+`,
+ },
+ // 6: No metric type, should result in default type Counter.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("doc string"),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Counter: &dto.Counter{
+ Value: proto.Float64(math.Inf(-1)),
+ },
+ },
+ },
+ },
+ out: `# HELP name doc string
+# TYPE name counter
+name -Inf
+`,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ out := bytes.NewBuffer(make([]byte, 0, len(scenario.out)))
+ n, err := MetricFamilyToText(out, scenario.in)
+ if err != nil {
+ t.Errorf("%d. error: %s", i, err)
+ continue
+ }
+ if expected, got := len(scenario.out), n; expected != got {
+ t.Errorf(
+ "%d. expected %d bytes written, got %d",
+ i, expected, got,
+ )
+ }
+ if expected, got := scenario.out, out.String(); expected != got {
+ t.Errorf(
+ "%d. expected out=%q, got %q",
+ i, expected, got,
+ )
+ }
+ }
+
+}
+
+func TestCreate(t *testing.T) {
+ testCreate(t)
+}
+
+func BenchmarkCreate(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testCreate(b)
+ }
+}
+
+func testCreateError(t testing.TB) {
+ var scenarios = []struct {
+ in *dto.MetricFamily
+ err string
+ }{
+ // 0: No metric.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("doc string"),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{},
+ },
+ err: "MetricFamily has no metrics",
+ },
+ // 1: No metric name.
+ {
+ in: &dto.MetricFamily{
+ Help: proto.String("doc string"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(math.Inf(-1)),
+ },
+ },
+ },
+ },
+ err: "MetricFamily has no name",
+ },
+ // 2: Wrong type.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("doc string"),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(math.Inf(-1)),
+ },
+ },
+ },
+ },
+ err: "expected counter in metric",
+ },
+ }
+
+ for i, scenario := range scenarios {
+ var out bytes.Buffer
+ _, err := MetricFamilyToText(&out, scenario.in)
+ if err == nil {
+ t.Errorf("%d. expected error, got nil", i)
+ continue
+ }
+ if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 {
+ t.Errorf(
+ "%d. expected error starting with %q, got %q",
+ i, expected, got,
+ )
+ }
+ }
+
+}
+
+func TestCreateError(t *testing.T) {
+ testCreateError(t)
+}
+
+func BenchmarkCreateError(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testCreateError(b)
+ }
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
new file mode 100644
index 000000000..ef9a15077
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -0,0 +1,753 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
+)
+
+// A stateFn is a function that represents a state in a state machine. By
+// executing it, the state is progressed to the next state. The stateFn returns
+// another stateFn, which represents the new state. The end state is represented
+// by nil.
+type stateFn func() stateFn
+
+// ParseError signals errors while parsing the simple and flat text-based
+// exchange format.
+type ParseError struct {
+ Line int
+ Msg string
+}
+
+// Error implements the error interface.
+func (e ParseError) Error() string {
+ return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
+}
+
+// TextParser is used to parse the simple and flat text-based exchange format. Its
+// zero value is ready to use.
+type TextParser struct {
+ metricFamiliesByName map[string]*dto.MetricFamily
+ buf *bufio.Reader // Where the parsed input is read through.
+ err error // Most recent error.
+ lineCount int // Tracks the line count for error messages.
+ currentByte byte // The most recent byte read.
+ currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
+ currentMF *dto.MetricFamily
+ currentMetric *dto.Metric
+ currentLabelPair *dto.LabelPair
+
+ // The remaining member variables are only used for summaries/histograms.
+ currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
+ // Summary specific.
+ summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentQuantile float64
+ // Histogram specific.
+ histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentBucket float64
+ // These tell us if the currently processed line ends on '_count' or
+ // '_sum' respectively and belong to a summary/histogram, representing the sample
+ // count and sum of that summary/histogram.
+ currentIsSummaryCount, currentIsSummarySum bool
+ currentIsHistogramCount, currentIsHistogramSum bool
+}
+
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
+// format and creates MetricFamily proto messages. It returns the MetricFamily
+// proto messages in a map where the metric names are the keys, along with any
+// error encountered.
+//
+// If the input contains duplicate metrics (i.e. lines with the same metric name
+// and exactly the same label set), the resulting MetricFamily will contain
+// duplicate Metric proto messages. Similar is true for duplicate label
+// names. Checks for duplicates have to be performed separately, if required.
+// Also note that neither the metrics within each MetricFamily are sorted nor
+// the label pairs within each Metric. Sorting is not required for the most
+// frequent use of this method, which is sample ingestion in the Prometheus
+// server. However, for presentation purposes, you might want to sort the
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
+// the metric family injection hook of the Prometheus registry.
+//
+// Summaries and histograms are rather special beasts. You would probably not
+// use them in the simple text format anyway. This method can deal with
+// summaries and histograms if they are presented in exactly the way the
+// text.Create function creates them.
+//
+// This method must not be called concurrently. If you want to parse different
+// input concurrently, instantiate a separate Parser for each goroutine.
+func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
+ p.reset(in)
+ for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
+ // Magic happens here...
+ }
+ // Get rid of empty metric families.
+ for k, mf := range p.metricFamiliesByName {
+ if len(mf.GetMetric()) == 0 {
+ delete(p.metricFamiliesByName, k)
+ }
+ }
+ // If p.err is io.EOF now, we have run into a premature end of the input
+ // stream. Turn this error into something nicer and more
+ // meaningful. (io.EOF is often used as a signal for the legitimate end
+ // of an input stream.)
+ if p.err == io.EOF {
+ p.parseError("unexpected end of input stream")
+ }
+ return p.metricFamiliesByName, p.err
+}
+
+func (p *TextParser) reset(in io.Reader) {
+ p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+ if p.buf == nil {
+ p.buf = bufio.NewReader(in)
+ } else {
+ p.buf.Reset(in)
+ }
+ p.err = nil
+ p.lineCount = 0
+ if p.summaries == nil || len(p.summaries) > 0 {
+ p.summaries = map[uint64]*dto.Metric{}
+ }
+ if p.histograms == nil || len(p.histograms) > 0 {
+ p.histograms = map[uint64]*dto.Metric{}
+ }
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+}
+
+// startOfLine represents the state where the next byte read from p.buf is the
+// start of a line (or whitespace leading up to it).
+func (p *TextParser) startOfLine() stateFn {
+ p.lineCount++
+ if p.skipBlankTab(); p.err != nil {
+ // End of input reached. This is the only case where
+ // that is not an error but a signal that we are done.
+ p.err = nil
+ return nil
+ }
+ switch p.currentByte {
+ case '#':
+ return p.startComment
+ case '\n':
+ return p.startOfLine // Empty line, start the next one.
+ }
+ return p.readingMetricName
+}
+
+// startComment represents the state where the next byte read from p.buf is the
+// start of a comment (or whitespace leading up to it).
+func (p *TextParser) startComment() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ // If we have hit the end of line already, there is nothing left
+ // to do. This is not considered a syntax error.
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ keyword := p.currentToken.String()
+ if keyword != "HELP" && keyword != "TYPE" {
+ // Generic comment, ignore by fast forwarding to end of line.
+ for p.currentByte != '\n' {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ }
+ return p.startOfLine
+ }
+ // There is something. Next has to be a metric name.
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ if !isBlankOrTab(p.currentByte) {
+ p.parseError("invalid metric name in comment")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ switch keyword {
+ case "HELP":
+ return p.readingHelp
+ case "TYPE":
+ return p.readingType
+ }
+ panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
+}
+
+// readingMetricName represents the state where the last byte read (now in
+// p.currentByte) is the first byte of a metric name.
+func (p *TextParser) readingMetricName() stateFn {
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError("invalid metric name")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ // Now is the time to fix the type if it hasn't happened yet.
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ // Do not append the newly created currentMetric to
+ // currentMF.Metric right now. First wait if this is a summary,
+ // and the metric exists already, which we can only know after
+ // having read all the labels.
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingLabels
+}
+
+// readingLabels represents the state where the last byte read (now in
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
+// first byte of the value (otherwise).
+func (p *TextParser) readingLabels() stateFn {
+ // Summaries/histograms are special. We have to reset the
+ // currentLabels map, currentQuantile and currentBucket before starting to
+ // read labels.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ p.currentLabels = map[string]string{}
+ p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+ }
+ if p.currentByte != '{' {
+ return p.readingValue
+ }
+ return p.startLabelName
+}
+
+// startLabelName represents the state where the next byte read from p.buf is
+// the start of a label name (or whitespace leading up to it).
+func (p *TextParser) startLabelName() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '}' {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ }
+ if p.readTokenAsLabelName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+ if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+ p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+ return nil
+ }
+ // Special summary/histogram treatment. Don't add 'quantile' and 'le'
+ // labels to 'real' labels.
+ if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
+ !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
+ }
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '=' {
+ p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+ return nil
+ }
+ return p.startLabelValue
+}
+
+// startLabelValue represents the state where the next byte read from p.buf is
+// the start of a (quoted) label value (or whitespace leading up to it).
+func (p *TextParser) startLabelValue() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '"' {
+ p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
+ return nil
+ }
+ if p.readTokenAsLabelValue(); p.err != nil {
+ return nil
+ }
+ p.currentLabelPair.Value = proto.String(p.currentToken.String())
+ // Special treatment of summaries:
+ // - Quantile labels are special, will result in dto.Quantile later.
+ // - Other labels have to be added to currentLabels for signature calculation.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if p.currentLabelPair.GetName() == model.QuantileLabel {
+ if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ // Similar special treatment of histograms.
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if p.currentLabelPair.GetName() == model.BucketLabel {
+ if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ switch p.currentByte {
+ case ',':
+ return p.startLabelName
+
+ case '}':
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ default:
+ p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
+ return nil
+ }
+}
+
+// readingValue represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the sample value (i.e. a float).
+func (p *TextParser) readingValue() stateFn {
+ // When we are here, we have read all the labels, so for the
+ // special case of a summary/histogram, we can finally find out
+ // if the metric already exists.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if summary := p.summaries[signature]; summary != nil {
+ p.currentMetric = summary
+ } else {
+ p.summaries[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if histogram := p.histograms[signature]; histogram != nil {
+ p.currentMetric = histogram
+ } else {
+ p.histograms[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else {
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ value, err := strconv.ParseFloat(p.currentToken.String(), 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
+ return nil
+ }
+ switch p.currentMF.GetType() {
+ case dto.MetricType_COUNTER:
+ p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
+ case dto.MetricType_GAUGE:
+ p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
+ case dto.MetricType_UNTYPED:
+ p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
+ case dto.MetricType_SUMMARY:
+ // *sigh*
+ if p.currentMetric.Summary == nil {
+ p.currentMetric.Summary = &dto.Summary{}
+ }
+ switch {
+ case p.currentIsSummaryCount:
+ p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsSummarySum:
+ p.currentMetric.Summary.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentQuantile):
+ p.currentMetric.Summary.Quantile = append(
+ p.currentMetric.Summary.Quantile,
+ &dto.Quantile{
+ Quantile: proto.Float64(p.currentQuantile),
+ Value: proto.Float64(value),
+ },
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ // *sigh*
+ if p.currentMetric.Histogram == nil {
+ p.currentMetric.Histogram = &dto.Histogram{}
+ }
+ switch {
+ case p.currentIsHistogramCount:
+ p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsHistogramSum:
+ p.currentMetric.Histogram.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentBucket):
+ p.currentMetric.Histogram.Bucket = append(
+ p.currentMetric.Histogram.Bucket,
+ &dto.Bucket{
+ UpperBound: proto.Float64(p.currentBucket),
+ CumulativeCount: proto.Uint64(uint64(value)),
+ },
+ )
+ }
+ default:
+ p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ return p.startTimestamp
+}
+
+// startTimestamp represents the state where the next byte read from p.buf is
+// the start of the timestamp (or whitespace leading up to it).
+func (p *TextParser) startTimestamp() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMetric.TimestampMs = proto.Int64(timestamp)
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() > 0 {
+ p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
+ return nil
+ }
+ return p.startOfLine
+}
+
+// readingHelp represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the docstring after 'HELP'.
+func (p *TextParser) readingHelp() stateFn {
+ if p.currentMF.Help != nil {
+ p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the docstring.
+ if p.readTokenUntilNewline(true); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ p.currentMF.Help = proto.String(p.currentToken.String())
+ return p.startOfLine
+}
+
+// readingType represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the type hint after 'HELP'.
+func (p *TextParser) readingType() stateFn {
+ if p.currentMF.Type != nil {
+ p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the type.
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
+ if !ok {
+ p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMF.Type = dto.MetricType(metricType).Enum()
+ return p.startOfLine
+}
+
+// parseError sets p.err to a ParseError at the current line with the given
+// message.
+func (p *TextParser) parseError(msg string) {
+ p.err = ParseError{
+ Line: p.lineCount,
+ Msg: msg,
+ }
+}
+
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
+func (p *TextParser) skipBlankTab() {
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
+ return
+ }
+ }
+}
+
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
+// anything if p.currentByte is neither ' ' nor '\t'.
+func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
+ if isBlankOrTab(p.currentByte) {
+ p.skipBlankTab()
+ }
+}
+
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
+// first byte considered is the byte already read (now in p.currentByte). The
+// first whitespace byte encountered is still copied into p.currentByte, but not
+// into p.currentToken.
+func (p *TextParser) readTokenUntilWhitespace() {
+ p.currentToken.Reset()
+ for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
+// byte considered is the byte already read (now in p.currentByte). The first
+// newline byte encountered is still copied into p.currentByte, but not into
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
+// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
+// other escape sequences are invalid and cause an error.
+func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
+ p.currentToken.Reset()
+ escaped := false
+ for p.err == nil {
+ if recognizeEscapeSequence && escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '\n':
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a metric name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsMetricName() {
+ p.currentToken.Reset()
+ if !isValidMetricNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a label name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelName() {
+ p.currentToken.Reset()
+ if !isValidLabelNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
+// In contrast to the other 'readTokenAs...' functions, which start with the
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
+// with reading a new byte from p.buf. The first byte not part of a label value
+// is still copied into p.currentByte, but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelValue() {
+ p.currentToken.Reset()
+ escaped := false
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return
+ }
+ if escaped {
+ switch p.currentByte {
+ case '"', '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ continue
+ }
+ switch p.currentByte {
+ case '"':
+ return
+ case '\n':
+ p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+}
+
+func (p *TextParser) setOrCreateCurrentMF() {
+ p.currentIsSummaryCount = false
+ p.currentIsSummarySum = false
+ p.currentIsHistogramCount = false
+ p.currentIsHistogramSum = false
+ name := p.currentToken.String()
+ if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
+ return
+ }
+ // Try out if this is a _sum or _count for a summary/histogram.
+ summaryName := summaryMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if isCount(name) {
+ p.currentIsSummaryCount = true
+ }
+ if isSum(name) {
+ p.currentIsSummarySum = true
+ }
+ return
+ }
+ }
+ histogramName := histogramMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if isCount(name) {
+ p.currentIsHistogramCount = true
+ }
+ if isSum(name) {
+ p.currentIsHistogramSum = true
+ }
+ return
+ }
+ }
+ p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
+ p.metricFamiliesByName[name] = p.currentMF
+}
+
+func isValidLabelNameStart(b byte) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+}
+
+func isValidLabelNameContinuation(b byte) bool {
+ return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+}
+
+func isValidMetricNameStart(b byte) bool {
+ return isValidLabelNameStart(b) || b == ':'
+}
+
+func isValidMetricNameContinuation(b byte) bool {
+ return isValidLabelNameContinuation(b) || b == ':'
+}
+
+func isBlankOrTab(b byte) bool {
+ return b == ' ' || b == '\t'
+}
+
+func isCount(name string) bool {
+ return len(name) > 6 && name[len(name)-6:] == "_count"
+}
+
+func isSum(name string) bool {
+ return len(name) > 4 && name[len(name)-4:] == "_sum"
+}
+
+func isBucket(name string) bool {
+ return len(name) > 7 && name[len(name)-7:] == "_bucket"
+}
+
+func summaryMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ default:
+ return name
+ }
+}
+
+func histogramMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ case isBucket(name):
+ return name[:len(name)-7]
+ default:
+ return name
+ }
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse_test.go b/vendor/github.com/prometheus/common/expfmt/text_parse_test.go
new file mode 100644
index 000000000..7e7388ce9
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse_test.go
@@ -0,0 +1,588 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "math"
+ "strings"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ dto "github.com/prometheus/client_model/go"
+)
+
+func testTextParse(t testing.TB) {
+ var scenarios = []struct {
+ in string
+ out []*dto.MetricFamily
+ }{
+ // 0: Empty lines as input.
+ {
+ in: `
+
+`,
+ out: []*dto.MetricFamily{},
+ },
+ // 1: Minimal case.
+ {
+ in: `
+minimal_metric 1.234
+another_metric -3e3 103948
+# Even that:
+no_labels{} 3
+# HELP line for non-existing metric will be ignored.
+`,
+ out: []*dto.MetricFamily{
+ &dto.MetricFamily{
+ Name: proto.String("minimal_metric"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(1.234),
+ },
+ },
+ },
+ },
+ &dto.MetricFamily{
+ Name: proto.String("another_metric"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(-3e3),
+ },
+ TimestampMs: proto.Int64(103948),
+ },
+ },
+ },
+ &dto.MetricFamily{
+ Name: proto.String("no_labels"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(3),
+ },
+ },
+ },
+ },
+ },
+ },
+ // 2: Counters & gauges, docstrings, various whitespace, escape sequences.
+ {
+ in: `
+# A normal comment.
+#
+# TYPE name counter
+name{labelname="val1",basename="basevalue"} NaN
+name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890
+# HELP name two-line\n doc str\\ing
+
+ # HELP name2 doc str"ing 2
+ # TYPE name2 gauge
+name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321
+name2{ labelname = "val1" , }-Inf
+`,
+ out: []*dto.MetricFamily{
+ &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("two-line\n doc str\\ing"),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("labelname"),
+ Value: proto.String("val1"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("basename"),
+ Value: proto.String("basevalue"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(math.NaN()),
+ },
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("labelname"),
+ Value: proto.String("val2"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("basename"),
+ Value: proto.String("base\"v\\al\nue"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(.23),
+ },
+ TimestampMs: proto.Int64(1234567890),
+ },
+ },
+ },
+ &dto.MetricFamily{
+ Name: proto.String("name2"),
+ Help: proto.String("doc str\"ing 2"),
+ Type: dto.MetricType_GAUGE.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("labelname"),
+ Value: proto.String("val2"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("basename"),
+ Value: proto.String("basevalue2"),
+ },
+ },
+ Gauge: &dto.Gauge{
+ Value: proto.Float64(math.Inf(+1)),
+ },
+ TimestampMs: proto.Int64(54321),
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("labelname"),
+ Value: proto.String("val1"),
+ },
+ },
+ Gauge: &dto.Gauge{
+ Value: proto.Float64(math.Inf(-1)),
+ },
+ },
+ },
+ },
+ },
+ },
+ // 3: The evil summary, mixed with other types and funny comments.
+ {
+ in: `
+# TYPE my_summary summary
+my_summary{n1="val1",quantile="0.5"} 110
+decoy -1 -2
+my_summary{n1="val1",quantile="0.9"} 140 1
+my_summary_count{n1="val1"} 42
+# Latest timestamp wins in case of a summary.
+my_summary_sum{n1="val1"} 4711 2
+fake_sum{n1="val1"} 2001
+# TYPE another_summary summary
+another_summary_count{n2="val2",n1="val1"} 20
+my_summary_count{n2="val2",n1="val1"} 5 5
+another_summary{n1="val1",n2="val2",quantile=".3"} -1.2
+my_summary_sum{n1="val2"} 08 15
+my_summary{n1="val3", quantile="0.2"} 4711
+ my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN
+# some
+# funny comments
+# HELP
+# HELP
+# HELP my_summary
+# HELP my_summary
+`,
+ out: []*dto.MetricFamily{
+ &dto.MetricFamily{
+ Name: proto.String("fake_sum"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("n1"),
+ Value: proto.String("val1"),
+ },
+ },
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(2001),
+ },
+ },
+ },
+ },
+ &dto.MetricFamily{
+ Name: proto.String("decoy"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(-1),
+ },
+ TimestampMs: proto.Int64(-2),
+ },
+ },
+ },
+ &dto.MetricFamily{
+ Name: proto.String("my_summary"),
+ Type: dto.MetricType_SUMMARY.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("n1"),
+ Value: proto.String("val1"),
+ },
+ },
+ Summary: &dto.Summary{
+ SampleCount: proto.Uint64(42),
+ SampleSum: proto.Float64(4711),
+ Quantile: []*dto.Quantile{
+ &dto.Quantile{
+ Quantile: proto.Float64(0.5),
+ Value: proto.Float64(110),
+ },
+ &dto.Quantile{
+ Quantile: proto.Float64(0.9),
+ Value: proto.Float64(140),
+ },
+ },
+ },
+ TimestampMs: proto.Int64(2),
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("n2"),
+ Value: proto.String("val2"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("n1"),
+ Value: proto.String("val1"),
+ },
+ },
+ Summary: &dto.Summary{
+ SampleCount: proto.Uint64(5),
+ Quantile: []*dto.Quantile{
+ &dto.Quantile{
+ Quantile: proto.Float64(-12.34),
+ Value: proto.Float64(math.NaN()),
+ },
+ },
+ },
+ TimestampMs: proto.Int64(5),
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("n1"),
+ Value: proto.String("val2"),
+ },
+ },
+ Summary: &dto.Summary{
+ SampleSum: proto.Float64(8),
+ },
+ TimestampMs: proto.Int64(15),
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("n1"),
+ Value: proto.String("val3"),
+ },
+ },
+ Summary: &dto.Summary{
+ Quantile: []*dto.Quantile{
+ &dto.Quantile{
+ Quantile: proto.Float64(0.2),
+ Value: proto.Float64(4711),
+ },
+ },
+ },
+ },
+ },
+ },
+ &dto.MetricFamily{
+ Name: proto.String("another_summary"),
+ Type: dto.MetricType_SUMMARY.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("n2"),
+ Value: proto.String("val2"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("n1"),
+ Value: proto.String("val1"),
+ },
+ },
+ Summary: &dto.Summary{
+ SampleCount: proto.Uint64(20),
+ Quantile: []*dto.Quantile{
+ &dto.Quantile{
+ Quantile: proto.Float64(0.3),
+ Value: proto.Float64(-1.2),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ // 4: The histogram.
+ {
+ in: `
+# HELP request_duration_microseconds The response latency.
+# TYPE request_duration_microseconds histogram
+request_duration_microseconds_bucket{le="100"} 123
+request_duration_microseconds_bucket{le="120"} 412
+request_duration_microseconds_bucket{le="144"} 592
+request_duration_microseconds_bucket{le="172.8"} 1524
+request_duration_microseconds_bucket{le="+Inf"} 2693
+request_duration_microseconds_sum 1.7560473e+06
+request_duration_microseconds_count 2693
+`,
+ out: []*dto.MetricFamily{
+ {
+ Name: proto.String("request_duration_microseconds"),
+ Help: proto.String("The response latency."),
+ Type: dto.MetricType_HISTOGRAM.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Histogram: &dto.Histogram{
+ SampleCount: proto.Uint64(2693),
+ SampleSum: proto.Float64(1756047.3),
+ Bucket: []*dto.Bucket{
+ &dto.Bucket{
+ UpperBound: proto.Float64(100),
+ CumulativeCount: proto.Uint64(123),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(120),
+ CumulativeCount: proto.Uint64(412),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(144),
+ CumulativeCount: proto.Uint64(592),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(172.8),
+ CumulativeCount: proto.Uint64(1524),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(math.Inf(+1)),
+ CumulativeCount: proto.Uint64(2693),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for i, scenario := range scenarios {
+ out, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in))
+ if err != nil {
+ t.Errorf("%d. error: %s", i, err)
+ continue
+ }
+ if expected, got := len(scenario.out), len(out); expected != got {
+ t.Errorf(
+ "%d. expected %d MetricFamilies, got %d",
+ i, expected, got,
+ )
+ }
+ for _, expected := range scenario.out {
+ got, ok := out[expected.GetName()]
+ if !ok {
+ t.Errorf(
+ "%d. expected MetricFamily %q, found none",
+ i, expected.GetName(),
+ )
+ continue
+ }
+ if expected.String() != got.String() {
+ t.Errorf(
+ "%d. expected MetricFamily %s, got %s",
+ i, expected, got,
+ )
+ }
+ }
+ }
+}
+
+func TestTextParse(t *testing.T) {
+ testTextParse(t)
+}
+
+func BenchmarkTextParse(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testTextParse(b)
+ }
+}
+
+func testTextParseError(t testing.TB) {
+ var scenarios = []struct {
+ in string
+ err string
+ }{
+ // 0: No new-line at end of input.
+ {
+ in: `
+bla 3.14
+blubber 42`,
+ err: "text format parsing error in line 3: unexpected end of input stream",
+ },
+ // 1: Invalid escape sequence in label value.
+ {
+ in: `metric{label="\t"} 3.14`,
+ err: "text format parsing error in line 1: invalid escape sequence",
+ },
+ // 2: Newline in label value.
+ {
+ in: `
+metric{label="new
+line"} 3.14
+`,
+ err: `text format parsing error in line 2: label value "new" contains unescaped new-line`,
+ },
+ // 3:
+ {
+ in: `metric{@="bla"} 3.14`,
+ err: "text format parsing error in line 1: invalid label name for metric",
+ },
+ // 4:
+ {
+ in: `metric{__name__="bla"} 3.14`,
+ err: `text format parsing error in line 1: label name "__name__" is reserved`,
+ },
+ // 5:
+ {
+ in: `metric{label+="bla"} 3.14`,
+ err: "text format parsing error in line 1: expected '=' after label name",
+ },
+ // 6:
+ {
+ in: `metric{label=bla} 3.14`,
+ err: "text format parsing error in line 1: expected '\"' at start of label value",
+ },
+ // 7:
+ {
+ in: `
+# TYPE metric summary
+metric{quantile="bla"} 3.14
+`,
+ err: "text format parsing error in line 3: expected float as value for 'quantile' label",
+ },
+ // 8:
+ {
+ in: `metric{label="bla"+} 3.14`,
+ err: "text format parsing error in line 1: unexpected end of label value",
+ },
+ // 9:
+ {
+ in: `metric{label="bla"} 3.14 2.72
+`,
+ err: "text format parsing error in line 1: expected integer as timestamp",
+ },
+ // 10:
+ {
+ in: `metric{label="bla"} 3.14 2 3
+`,
+ err: "text format parsing error in line 1: spurious string after timestamp",
+ },
+ // 11:
+ {
+ in: `metric{label="bla"} blubb
+`,
+ err: "text format parsing error in line 1: expected float as value",
+ },
+ // 12:
+ {
+ in: `
+# HELP metric one
+# HELP metric two
+`,
+ err: "text format parsing error in line 3: second HELP line for metric name",
+ },
+ // 13:
+ {
+ in: `
+# TYPE metric counter
+# TYPE metric untyped
+`,
+ err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`,
+ },
+ // 14:
+ {
+ in: `
+metric 4.12
+# TYPE metric counter
+`,
+ err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`,
+ },
+ // 14:
+ {
+ in: `
+# TYPE metric bla
+`,
+ err: "text format parsing error in line 2: unknown metric type",
+ },
+ // 15:
+ {
+ in: `
+# TYPE met-ric
+`,
+ err: "text format parsing error in line 2: invalid metric name in comment",
+ },
+ // 16:
+ {
+ in: `@invalidmetric{label="bla"} 3.14 2`,
+ err: "text format parsing error in line 1: invalid metric name",
+ },
+ // 17:
+ {
+ in: `{label="bla"} 3.14 2`,
+ err: "text format parsing error in line 1: invalid metric name",
+ },
+ // 18:
+ {
+ in: `
+# TYPE metric histogram
+metric_bucket{le="bla"} 3.14
+`,
+ err: "text format parsing error in line 3: expected float as value for 'le' label",
+ },
+ }
+
+ for i, scenario := range scenarios {
+ _, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in))
+ if err == nil {
+ t.Errorf("%d. expected error, got nil", i)
+ continue
+ }
+ if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 {
+ t.Errorf(
+ "%d. expected error starting with %q, got %q",
+ i, expected, got,
+ )
+ }
+ }
+
+}
+
+func TestTextParseError(t *testing.T) {
+ testTextParseError(t)
+}
+
+func BenchmarkParseError(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testTextParseError(b)
+ }
+}
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
new file mode 100644
index 000000000..7723656d5
--- /dev/null
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+ Type, SubType string
+ Q float32
+ Params map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+ .hg
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
new file mode 100644
index 000000000..648b38cb6
--- /dev/null
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
@@ -0,0 +1,162 @@
+/*
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+*/
+package goautoneg
+
+import (
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+ Type, SubType string
+ Q float64
+ Params map[string]string
+}
+
+// For internal use, so that we can use the sort interface
+type accept_slice []Accept
+
+func (accept accept_slice) Len() int {
+ slice := []Accept(accept)
+ return len(slice)
+}
+
+func (accept accept_slice) Less(i, j int) bool {
+ slice := []Accept(accept)
+ ai, aj := slice[i], slice[j]
+ if ai.Q > aj.Q {
+ return true
+ }
+ if ai.Type != "*" && aj.Type == "*" {
+ return true
+ }
+ if ai.SubType != "*" && aj.SubType == "*" {
+ return true
+ }
+ return false
+}
+
+func (accept accept_slice) Swap(i, j int) {
+ slice := []Accept(accept)
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) (accept []Accept) {
+ parts := strings.Split(header, ",")
+ accept = make([]Accept, 0, len(parts))
+ for _, part := range parts {
+ part := strings.Trim(part, " ")
+
+ a := Accept{}
+ a.Params = make(map[string]string)
+ a.Q = 1.0
+
+ mrp := strings.Split(part, ";")
+
+ media_range := mrp[0]
+ sp := strings.Split(media_range, "/")
+ a.Type = strings.Trim(sp[0], " ")
+
+ switch {
+ case len(sp) == 1 && a.Type == "*":
+ a.SubType = "*"
+ case len(sp) == 2:
+ a.SubType = strings.Trim(sp[1], " ")
+ default:
+ continue
+ }
+
+ if len(mrp) == 1 {
+ accept = append(accept, a)
+ continue
+ }
+
+ for _, param := range mrp[1:] {
+ sp := strings.SplitN(param, "=", 2)
+ if len(sp) != 2 {
+ continue
+ }
+ token := strings.Trim(sp[0], " ")
+ if token == "q" {
+ a.Q, _ = strconv.ParseFloat(sp[1], 32)
+ } else {
+ a.Params[token] = strings.Trim(sp[1], " ")
+ }
+ }
+
+ accept = append(accept, a)
+ }
+
+ slice := accept_slice(accept)
+ sort.Sort(slice)
+
+ return
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+ asp := make([][]string, 0, len(alternatives))
+ for _, ctype := range alternatives {
+ asp = append(asp, strings.SplitN(ctype, "/", 2))
+ }
+ for _, clause := range ParseAccept(header) {
+ for i, ctsp := range asp {
+ if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == ctsp[0] && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == "*" && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go
new file mode 100644
index 000000000..41d328f1d
--- /dev/null
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go
@@ -0,0 +1,33 @@
+package goautoneg
+
+import (
+ "testing"
+)
+
+var chrome = "application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5"
+
+func TestParseAccept(t *testing.T) {
+ alternatives := []string{"text/html", "image/png"}
+ content_type := Negotiate(chrome, alternatives)
+ if content_type != "image/png" {
+ t.Errorf("got %s expected image/png", content_type)
+ }
+
+ alternatives = []string{"text/html", "text/plain", "text/n3"}
+ content_type = Negotiate(chrome, alternatives)
+ if content_type != "text/html" {
+ t.Errorf("got %s expected text/html", content_type)
+ }
+
+ alternatives = []string{"text/n3", "text/plain"}
+ content_type = Negotiate(chrome, alternatives)
+ if content_type != "text/plain" {
+ t.Errorf("got %s expected text/plain", content_type)
+ }
+
+ alternatives = []string{"text/n3", "application/rdf+xml"}
+ content_type = Negotiate(chrome, alternatives)
+ if content_type != "text/n3" {
+ t.Errorf("got %s expected text/n3", content_type)
+ }
+}
diff --git a/vendor/github.com/prometheus/common/log/eventlog_formatter.go b/vendor/github.com/prometheus/common/log/eventlog_formatter.go
new file mode 100644
index 000000000..6d41284ce
--- /dev/null
+++ b/vendor/github.com/prometheus/common/log/eventlog_formatter.go
@@ -0,0 +1,89 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build windows
+
+package log
+
+import (
+ "fmt"
+ "os"
+
+ "golang.org/x/sys/windows/svc/eventlog"
+
+ "github.com/Sirupsen/logrus"
+)
+
+func init() {
+ setEventlogFormatter = func(name string, debugAsInfo bool) error {
+ if name == "" {
+ return fmt.Errorf("missing name parameter")
+ }
+
+ fmter, err := newEventlogger(name, debugAsInfo, origLogger.Formatter)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error creating eventlog formatter: %v\n", err)
+ origLogger.Errorf("can't connect logger to eventlog: %v", err)
+ return err
+ }
+ origLogger.Formatter = fmter
+ return nil
+ }
+}
+
+type eventlogger struct {
+ log *eventlog.Log
+ debugAsInfo bool
+ wrap logrus.Formatter
+}
+
+func newEventlogger(name string, debugAsInfo bool, fmter logrus.Formatter) (*eventlogger, error) {
+ logHandle, err := eventlog.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ return &eventlogger{log: logHandle, debugAsInfo: debugAsInfo, wrap: fmter}, nil
+}
+
+func (s *eventlogger) Format(e *logrus.Entry) ([]byte, error) {
+ data, err := s.wrap.Format(e)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "eventlogger: can't format entry: %v\n", err)
+ return data, err
+ }
+
+ switch e.Level {
+ case logrus.PanicLevel:
+ fallthrough
+ case logrus.FatalLevel:
+ fallthrough
+ case logrus.ErrorLevel:
+ err = s.log.Error(102, e.Message)
+ case logrus.WarnLevel:
+ err = s.log.Warning(101, e.Message)
+ case logrus.InfoLevel:
+ err = s.log.Info(100, e.Message)
+ case logrus.DebugLevel:
+ if s.debugAsInfo {
+ err = s.log.Info(100, e.Message)
+ }
+ default:
+ err = s.log.Info(100, e.Message)
+ }
+
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "eventlogger: can't send log to eventlog: %v\n", err)
+ }
+
+ return data, err
+}
diff --git a/vendor/github.com/prometheus/common/log/log.go b/vendor/github.com/prometheus/common/log/log.go
new file mode 100644
index 000000000..efad4842f
--- /dev/null
+++ b/vendor/github.com/prometheus/common/log/log.go
@@ -0,0 +1,365 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/url"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+
+ "github.com/Sirupsen/logrus"
+)
+
+type levelFlag string
+
+// String implements flag.Value.
+func (f levelFlag) String() string {
+ return fmt.Sprintf("%q", string(f))
+}
+
+// Set implements flag.Value.
+func (f levelFlag) Set(level string) error {
+ l, err := logrus.ParseLevel(level)
+ if err != nil {
+ return err
+ }
+ origLogger.Level = l
+ return nil
+}
+
+// setSyslogFormatter is nil if the target architecture does not support syslog.
+var setSyslogFormatter func(string, string) error
+
+// setEventlogFormatter is nil if the target OS does not support Eventlog (i.e., is not Windows).
+var setEventlogFormatter func(string, bool) error
+
+func setJSONFormatter() {
+ origLogger.Formatter = &logrus.JSONFormatter{}
+}
+
+type logFormatFlag url.URL
+
+// String implements flag.Value.
+func (f logFormatFlag) String() string {
+ u := url.URL(f)
+ return fmt.Sprintf("%q", u.String())
+}
+
+// Set implements flag.Value.
+func (f logFormatFlag) Set(format string) error {
+ u, err := url.Parse(format)
+ if err != nil {
+ return err
+ }
+ if u.Scheme != "logger" {
+ return fmt.Errorf("invalid scheme %s", u.Scheme)
+ }
+ jsonq := u.Query().Get("json")
+ if jsonq == "true" {
+ setJSONFormatter()
+ }
+
+ switch u.Opaque {
+ case "syslog":
+ if setSyslogFormatter == nil {
+ return fmt.Errorf("system does not support syslog")
+ }
+ appname := u.Query().Get("appname")
+ facility := u.Query().Get("local")
+ return setSyslogFormatter(appname, facility)
+ case "eventlog":
+ if setEventlogFormatter == nil {
+ return fmt.Errorf("system does not support eventlog")
+ }
+ name := u.Query().Get("name")
+ debugAsInfo := false
+ debugAsInfoRaw := u.Query().Get("debugAsInfo")
+ if parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil {
+ debugAsInfo = parsedDebugAsInfo
+ }
+ return setEventlogFormatter(name, debugAsInfo)
+ case "stdout":
+ origLogger.Out = os.Stdout
+ case "stderr":
+ origLogger.Out = os.Stderr
+ default:
+ return fmt.Errorf("unsupported logger %q", u.Opaque)
+ }
+ return nil
+}
+
+func init() {
+ AddFlags(flag.CommandLine)
+}
+
+// AddFlags adds the flags used by this package to the given FlagSet. That's
+// useful if working with a custom FlagSet. The init function of this package
+// adds the flags to flag.CommandLine anyway. Thus, it's usually enough to call
+// flag.Parse() to make the logging flags take effect.
+func AddFlags(fs *flag.FlagSet) {
+ fs.Var(
+ levelFlag(origLogger.Level.String()),
+ "log.level",
+ "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]",
+ )
+ fs.Var(
+ logFormatFlag(url.URL{Scheme: "logger", Opaque: "stderr"}),
+ "log.format",
+ `Set the log target and format. Example: "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"`,
+ )
+}
+
+// Logger is the interface for loggers used in the Prometheus components.
+type Logger interface {
+ Debug(...interface{})
+ Debugln(...interface{})
+ Debugf(string, ...interface{})
+
+ Info(...interface{})
+ Infoln(...interface{})
+ Infof(string, ...interface{})
+
+ Warn(...interface{})
+ Warnln(...interface{})
+ Warnf(string, ...interface{})
+
+ Error(...interface{})
+ Errorln(...interface{})
+ Errorf(string, ...interface{})
+
+ Fatal(...interface{})
+ Fatalln(...interface{})
+ Fatalf(string, ...interface{})
+
+ With(key string, value interface{}) Logger
+}
+
+type logger struct {
+ entry *logrus.Entry
+}
+
+func (l logger) With(key string, value interface{}) Logger {
+ return logger{l.entry.WithField(key, value)}
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func (l logger) Debug(args ...interface{}) {
+ l.sourced().Debug(args...)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func (l logger) Debugln(args ...interface{}) {
+ l.sourced().Debugln(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func (l logger) Debugf(format string, args ...interface{}) {
+ l.sourced().Debugf(format, args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func (l logger) Info(args ...interface{}) {
+ l.sourced().Info(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func (l logger) Infoln(args ...interface{}) {
+ l.sourced().Infoln(args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func (l logger) Infof(format string, args ...interface{}) {
+ l.sourced().Infof(format, args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func (l logger) Warn(args ...interface{}) {
+ l.sourced().Warn(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func (l logger) Warnln(args ...interface{}) {
+ l.sourced().Warnln(args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func (l logger) Warnf(format string, args ...interface{}) {
+ l.sourced().Warnf(format, args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func (l logger) Error(args ...interface{}) {
+ l.sourced().Error(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func (l logger) Errorln(args ...interface{}) {
+ l.sourced().Errorln(args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func (l logger) Errorf(format string, args ...interface{}) {
+ l.sourced().Errorf(format, args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func (l logger) Fatal(args ...interface{}) {
+ l.sourced().Fatal(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func (l logger) Fatalln(args ...interface{}) {
+ l.sourced().Fatalln(args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func (l logger) Fatalf(format string, args ...interface{}) {
+ l.sourced().Fatalf(format, args...)
+}
+
+// sourced adds a source field to the logger that contains
+// the file name and line where the logging happened.
+func (l logger) sourced() *logrus.Entry {
+ _, file, line, ok := runtime.Caller(2)
+ if !ok {
+ file = "<???>"
+ line = 1
+ } else {
+ slash := strings.LastIndex(file, "/")
+ file = file[slash+1:]
+ }
+ return l.entry.WithField("source", fmt.Sprintf("%s:%d", file, line))
+}
+
+var origLogger = logrus.New()
+var baseLogger = logger{entry: logrus.NewEntry(origLogger)}
+
+// Base returns the default Logger logging to
+func Base() Logger {
+ return baseLogger
+}
+
+// NewLogger returns a new Logger logging to out.
+func NewLogger(w io.Writer) Logger {
+ l := logrus.New()
+ l.Out = w
+ return logger{entry: logrus.NewEntry(l)}
+}
+
+// NewNopLogger returns a logger that discards all log messages.
+func NewNopLogger() Logger {
+ l := logrus.New()
+ l.Out = ioutil.Discard
+ return logger{entry: logrus.NewEntry(l)}
+}
+
+// With adds a field to the logger.
+func With(key string, value interface{}) Logger {
+ return baseLogger.With(key, value)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ baseLogger.sourced().Debug(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ baseLogger.sourced().Debugln(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ baseLogger.sourced().Debugf(format, args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ baseLogger.sourced().Info(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ baseLogger.sourced().Infoln(args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ baseLogger.sourced().Infof(format, args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ baseLogger.sourced().Warn(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ baseLogger.sourced().Warnln(args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ baseLogger.sourced().Warnf(format, args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ baseLogger.sourced().Error(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ baseLogger.sourced().Errorln(args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ baseLogger.sourced().Errorf(format, args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ baseLogger.sourced().Fatal(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ baseLogger.sourced().Fatalln(args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ baseLogger.sourced().Fatalf(format, args...)
+}
+
+type errorLogWriter struct{}
+
+func (errorLogWriter) Write(b []byte) (int, error) {
+ baseLogger.sourced().Error(string(b))
+ return len(b), nil
+}
+
+// NewErrorLogger returns a log.Logger that is meant to be used
+// in the ErrorLog field of an http.Server to log HTTP server errors.
+func NewErrorLogger() *log.Logger {
+ return log.New(&errorLogWriter{}, "", 0)
+}
diff --git a/vendor/github.com/prometheus/common/log/log_test.go b/vendor/github.com/prometheus/common/log/log_test.go
new file mode 100644
index 000000000..953adb79c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/log/log_test.go
@@ -0,0 +1,39 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+import (
+ "bytes"
+ "regexp"
+ "testing"
+
+ "github.com/Sirupsen/logrus"
+)
+
+func TestFileLineLogging(t *testing.T) {
+ var buf bytes.Buffer
+ origLogger.Out = &buf
+ origLogger.Formatter = &logrus.TextFormatter{
+ DisableColors: true,
+ }
+
+ // The default logging level should be "info".
+ Debug("This debug-level line should not show up in the output.")
+ Infof("This %s-level line should show up in the output.", "info")
+
+ re := `^time=".*" level=info msg="This info-level line should show up in the output." source="log_test.go:33" \n$`
+ if !regexp.MustCompile(re).Match(buf.Bytes()) {
+ t.Fatalf("%q did not match expected regex %q", buf.String(), re)
+ }
+}
diff --git a/vendor/github.com/prometheus/common/log/syslog_formatter.go b/vendor/github.com/prometheus/common/log/syslog_formatter.go
new file mode 100644
index 000000000..fd8c6fbee
--- /dev/null
+++ b/vendor/github.com/prometheus/common/log/syslog_formatter.go
@@ -0,0 +1,119 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows,!nacl,!plan9
+
+package log
+
+import (
+ "fmt"
+ "log/syslog"
+ "os"
+
+ "github.com/Sirupsen/logrus"
+)
+
+func init() {
+ setSyslogFormatter = func(appname, local string) error {
+ if appname == "" {
+ return fmt.Errorf("missing appname parameter")
+ }
+ if local == "" {
+ return fmt.Errorf("missing local parameter")
+ }
+
+ fmter, err := newSyslogger(appname, local, origLogger.Formatter)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error creating syslog formatter: %v\n", err)
+ origLogger.Errorf("can't connect logger to syslog: %v", err)
+ return err
+ }
+ origLogger.Formatter = fmter
+ return nil
+ }
+}
+
+var ceeTag = []byte("@cee:")
+
+type syslogger struct {
+ wrap logrus.Formatter
+ out *syslog.Writer
+}
+
+func newSyslogger(appname string, facility string, fmter logrus.Formatter) (*syslogger, error) {
+ priority, err := getFacility(facility)
+ if err != nil {
+ return nil, err
+ }
+ out, err := syslog.New(priority, appname)
+ return &syslogger{
+ out: out,
+ wrap: fmter,
+ }, err
+}
+
+func getFacility(facility string) (syslog.Priority, error) {
+ switch facility {
+ case "0":
+ return syslog.LOG_LOCAL0, nil
+ case "1":
+ return syslog.LOG_LOCAL1, nil
+ case "2":
+ return syslog.LOG_LOCAL2, nil
+ case "3":
+ return syslog.LOG_LOCAL3, nil
+ case "4":
+ return syslog.LOG_LOCAL4, nil
+ case "5":
+ return syslog.LOG_LOCAL5, nil
+ case "6":
+ return syslog.LOG_LOCAL6, nil
+ case "7":
+ return syslog.LOG_LOCAL7, nil
+ }
+ return syslog.LOG_LOCAL0, fmt.Errorf("invalid local(%s) for syslog", facility)
+}
+
+func (s *syslogger) Format(e *logrus.Entry) ([]byte, error) {
+ data, err := s.wrap.Format(e)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "syslogger: can't format entry: %v\n", err)
+ return data, err
+ }
+ // only append tag to data sent to syslog (line), not to what
+ // is returned
+ line := string(append(ceeTag, data...))
+
+ switch e.Level {
+ case logrus.PanicLevel:
+ err = s.out.Crit(line)
+ case logrus.FatalLevel:
+ err = s.out.Crit(line)
+ case logrus.ErrorLevel:
+ err = s.out.Err(line)
+ case logrus.WarnLevel:
+ err = s.out.Warning(line)
+ case logrus.InfoLevel:
+ err = s.out.Info(line)
+ case logrus.DebugLevel:
+ err = s.out.Debug(line)
+ default:
+ err = s.out.Notice(line)
+ }
+
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "syslogger: can't send log to syslog: %v\n", err)
+ }
+
+ return data, err
+}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
new file mode 100644
index 000000000..35e739c7a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -0,0 +1,136 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "time"
+)
+
+type AlertStatus string
+
+const (
+ AlertFiring AlertStatus = "firing"
+ AlertResolved AlertStatus = "resolved"
+)
+
+// Alert is a generic representation of an alert in the Prometheus eco-system.
+type Alert struct {
+ // Label value pairs for purpose of aggregation, matching, and disposition
+ // dispatching. This must minimally include an "alertname" label.
+ Labels LabelSet `json:"labels"`
+
+ // Extra key/value information which does not define alert identity.
+ Annotations LabelSet `json:"annotations"`
+
+ // The known time range for this alert. Both ends are optional.
+ StartsAt time.Time `json:"startsAt,omitempty"`
+ EndsAt time.Time `json:"endsAt,omitempty"`
+ GeneratorURL string `json:"generatorURL"`
+}
+
+// Name returns the name of the alert. It is equivalent to the "alertname" label.
+func (a *Alert) Name() string {
+ return string(a.Labels[AlertNameLabel])
+}
+
+// Fingerprint returns a unique hash for the alert. It is equivalent to
+// the fingerprint of the alert's label set.
+func (a *Alert) Fingerprint() Fingerprint {
+ return a.Labels.Fingerprint()
+}
+
+func (a *Alert) String() string {
+ s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
+ if a.Resolved() {
+ return s + "[resolved]"
+ }
+ return s + "[active]"
+}
+
+// Resolved returns true iff the activity interval ended in the past.
+func (a *Alert) Resolved() bool {
+ return a.ResolvedAt(time.Now())
+}
+
+// ResolvedAt returns true off the activity interval ended before
+// the given timestamp.
+func (a *Alert) ResolvedAt(ts time.Time) bool {
+ if a.EndsAt.IsZero() {
+ return false
+ }
+ return !a.EndsAt.After(ts)
+}
+
+// Status returns the status of the alert.
+func (a *Alert) Status() AlertStatus {
+ if a.Resolved() {
+ return AlertResolved
+ }
+ return AlertFiring
+}
+
+// Validate checks whether the alert data is inconsistent.
+func (a *Alert) Validate() error {
+ if a.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if err := a.Labels.Validate(); err != nil {
+ return fmt.Errorf("invalid label set: %s", err)
+ }
+ if len(a.Labels) == 0 {
+ return fmt.Errorf("at least one label pair required")
+ }
+ if err := a.Annotations.Validate(); err != nil {
+ return fmt.Errorf("invalid annotations: %s", err)
+ }
+ return nil
+}
+
+// Alert is a list of alerts that can be sorted in chronological order.
+type Alerts []*Alert
+
+func (as Alerts) Len() int { return len(as) }
+func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
+
+func (as Alerts) Less(i, j int) bool {
+ if as[i].StartsAt.Before(as[j].StartsAt) {
+ return true
+ }
+ if as[i].EndsAt.Before(as[j].EndsAt) {
+ return true
+ }
+ return as[i].Fingerprint() < as[j].Fingerprint()
+}
+
+// HasFiring returns true iff one of the alerts is not resolved.
+func (as Alerts) HasFiring() bool {
+ for _, a := range as {
+ if !a.Resolved() {
+ return true
+ }
+ }
+ return false
+}
+
+// Status returns StatusFiring iff at least one of the alerts is firing.
+func (as Alerts) Status() AlertStatus {
+ if as.HasFiring() {
+ return AlertFiring
+ }
+ return AlertResolved
+}
diff --git a/vendor/github.com/prometheus/common/model/alert_test.go b/vendor/github.com/prometheus/common/model/alert_test.go
new file mode 100644
index 000000000..9692bca21
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/alert_test.go
@@ -0,0 +1,118 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestAlertValidate(t *testing.T) {
+ ts := time.Now()
+
+ var cases = []struct {
+ alert *Alert
+ err string
+ }{
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ StartsAt: ts,
+ },
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ },
+ err: "start time missing",
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ StartsAt: ts,
+ EndsAt: ts,
+ },
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ StartsAt: ts,
+ EndsAt: ts.Add(1 * time.Minute),
+ },
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ StartsAt: ts,
+ EndsAt: ts.Add(-1 * time.Minute),
+ },
+ err: "start time must be before end time",
+ },
+ {
+ alert: &Alert{
+ StartsAt: ts,
+ },
+ err: "at least one label pair required",
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b", "!bad": "label"},
+ StartsAt: ts,
+ },
+ err: "invalid label set: invalid name",
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b", "bad": "\xfflabel"},
+ StartsAt: ts,
+ },
+ err: "invalid label set: invalid value",
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ Annotations: LabelSet{"!bad": "label"},
+ StartsAt: ts,
+ },
+ err: "invalid annotations: invalid name",
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ Annotations: LabelSet{"bad": "\xfflabel"},
+ StartsAt: ts,
+ },
+ err: "invalid annotations: invalid value",
+ },
+ }
+
+ for i, c := range cases {
+ err := c.alert.Validate()
+ if err == nil {
+ if c.err == "" {
+ continue
+ }
+ t.Errorf("%d. Expected error %q but got none", i, c.err)
+ continue
+ }
+ if c.err == "" && err != nil {
+ t.Errorf("%d. Expected no error but got %q", i, err)
+ continue
+ }
+ if !strings.Contains(err.Error(), c.err) {
+ t.Errorf("%d. Expected error to contain %q but got %q", i, c.err, err)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go
new file mode 100644
index 000000000..fc4de4106
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fingerprinting.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// Fingerprint provides a hash-capable representation of a Metric.
+// For our purposes, FNV-1A 64-bit is used.
+type Fingerprint uint64
+
+// FingerprintFromString transforms a string representation into a Fingerprint.
+func FingerprintFromString(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ return Fingerprint(num), err
+}
+
+// ParseFingerprint parses the input string into a fingerprint.
+func ParseFingerprint(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return 0, err
+ }
+ return Fingerprint(num), nil
+}
+
+func (f Fingerprint) String() string {
+ return fmt.Sprintf("%016x", uint64(f))
+}
+
+// Fingerprints represents a collection of Fingerprint subject to a given
+// natural sorting scheme. It implements sort.Interface.
+type Fingerprints []Fingerprint
+
+// Len implements sort.Interface.
+func (f Fingerprints) Len() int {
+ return len(f)
+}
+
+// Less implements sort.Interface.
+func (f Fingerprints) Less(i, j int) bool {
+ return f[i] < f[j]
+}
+
+// Swap implements sort.Interface.
+func (f Fingerprints) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+// FingerprintSet is a set of Fingerprints.
+type FingerprintSet map[Fingerprint]struct{}
+
+// Equal returns true if both sets contain the same elements (and not more).
+func (s FingerprintSet) Equal(o FingerprintSet) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for k := range s {
+ if _, ok := o[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection returns the elements contained in both sets.
+func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
+ myLength, otherLength := len(s), len(o)
+ if myLength == 0 || otherLength == 0 {
+ return FingerprintSet{}
+ }
+
+ subSet := s
+ superSet := o
+
+ if otherLength < myLength {
+ subSet = o
+ superSet = s
+ }
+
+ out := FingerprintSet{}
+
+ for k := range subSet {
+ if _, ok := superSet[k]; ok {
+ out[k] = struct{}{}
+ }
+ }
+
+ return out
+}
diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go
new file mode 100644
index 000000000..038fc1c90
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
new file mode 100644
index 000000000..41051a01a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -0,0 +1,210 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strings"
+ "unicode/utf8"
+)
+
+const (
+ // AlertNameLabel is the name of the label containing the an alert's name.
+ AlertNameLabel = "alertname"
+
+ // ExportedLabelPrefix is the prefix to prepend to the label names present in
+ // exported metrics if a label of the same name is added by the server.
+ ExportedLabelPrefix = "exported_"
+
+ // MetricNameLabel is the label name indicating the metric name of a
+ // timeseries.
+ MetricNameLabel = "__name__"
+
+ // SchemeLabel is the name of the label that holds the scheme on which to
+ // scrape a target.
+ SchemeLabel = "__scheme__"
+
+ // AddressLabel is the name of the label that holds the address of
+ // a scrape target.
+ AddressLabel = "__address__"
+
+ // MetricsPathLabel is the name of the label that holds the path on which to
+ // scrape a target.
+ MetricsPathLabel = "__metrics_path__"
+
+ // ReservedLabelPrefix is a prefix which is not legal in user-supplied
+ // label names.
+ ReservedLabelPrefix = "__"
+
+ // MetaLabelPrefix is a prefix for labels that provide meta information.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series.
+ MetaLabelPrefix = "__meta_"
+
+ // TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series. This is reserved for use in
+ // Prometheus configuration files by users.
+ TmpLabelPrefix = "__tmp_"
+
+ // ParamLabelPrefix is a prefix for labels that provide URL parameters
+ // used to scrape a target.
+ ParamLabelPrefix = "__param_"
+
+ // JobLabel is the label name indicating the job from which a timeseries
+ // was scraped.
+ JobLabel = "job"
+
+ // InstanceLabel is the label name used for the instance label.
+ InstanceLabel = "instance"
+
+ // BucketLabel is used for the label that defines the upper bound of a
+ // bucket of a histogram ("le" -> "less or equal").
+ BucketLabel = "le"
+
+ // QuantileLabel is used for the label that defines the quantile in a
+ // summary.
+ QuantileLabel = "quantile"
+)
+
+// LabelNameRE is a regular expression matching valid label names. Note that the
+// IsValid method of LabelName performs the same check but faster than a match
+// with this regular expression.
+var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+// A LabelName is a key for a LabelSet or Metric. It has a value associated
+// therewith.
+type LabelName string
+
+// IsValid is true iff the label name matches the pattern of LabelNameRE. This
+// method, however, does not use LabelNameRE for the check but a much faster
+// hardcoded implementation.
+func (ln LabelName) IsValid() bool {
+ if len(ln) == 0 {
+ return false
+ }
+ for i, b := range ln {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ if !LabelName(s).IsValid() {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ln *LabelName) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if !LabelName(s).IsValid() {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// LabelNames is a sortable LabelName slice. In implements sort.Interface.
+type LabelNames []LabelName
+
+func (l LabelNames) Len() int {
+ return len(l)
+}
+
+func (l LabelNames) Less(i, j int) bool {
+ return l[i] < l[j]
+}
+
+func (l LabelNames) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+func (l LabelNames) String() string {
+ labelStrings := make([]string, 0, len(l))
+ for _, label := range l {
+ labelStrings = append(labelStrings, string(label))
+ }
+ return strings.Join(labelStrings, ", ")
+}
+
+// A LabelValue is an associated value for a LabelName.
+type LabelValue string
+
+// IsValid returns true iff the string is a valid UTF8.
+func (lv LabelValue) IsValid() bool {
+ return utf8.ValidString(string(lv))
+}
+
+// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
+type LabelValues []LabelValue
+
+func (l LabelValues) Len() int {
+ return len(l)
+}
+
+func (l LabelValues) Less(i, j int) bool {
+ return string(l[i]) < string(l[j])
+}
+
+func (l LabelValues) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+// LabelPair pairs a name with a value.
+type LabelPair struct {
+ Name LabelName
+ Value LabelValue
+}
+
+// LabelPairs is a sortable slice of LabelPair pointers. It implements
+// sort.Interface.
+type LabelPairs []*LabelPair
+
+func (l LabelPairs) Len() int {
+ return len(l)
+}
+
+func (l LabelPairs) Less(i, j int) bool {
+ switch {
+ case l[i].Name > l[j].Name:
+ return false
+ case l[i].Name < l[j].Name:
+ return true
+ case l[i].Value > l[j].Value:
+ return false
+ case l[i].Value < l[j].Value:
+ return true
+ default:
+ return false
+ }
+}
+
+func (l LabelPairs) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
diff --git a/vendor/github.com/prometheus/common/model/labels_test.go b/vendor/github.com/prometheus/common/model/labels_test.go
new file mode 100644
index 000000000..e8df28ffa
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labels_test.go
@@ -0,0 +1,140 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "sort"
+ "testing"
+)
+
+func testLabelNames(t testing.TB) {
+ var scenarios = []struct {
+ in LabelNames
+ out LabelNames
+ }{
+ {
+ in: LabelNames{"ZZZ", "zzz"},
+ out: LabelNames{"ZZZ", "zzz"},
+ },
+ {
+ in: LabelNames{"aaa", "AAA"},
+ out: LabelNames{"AAA", "aaa"},
+ },
+ }
+
+ for i, scenario := range scenarios {
+ sort.Sort(scenario.in)
+
+ for j, expected := range scenario.out {
+ if expected != scenario.in[j] {
+ t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j])
+ }
+ }
+ }
+}
+
+func TestLabelNames(t *testing.T) {
+ testLabelNames(t)
+}
+
+func BenchmarkLabelNames(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testLabelNames(b)
+ }
+}
+
+func testLabelValues(t testing.TB) {
+ var scenarios = []struct {
+ in LabelValues
+ out LabelValues
+ }{
+ {
+ in: LabelValues{"ZZZ", "zzz"},
+ out: LabelValues{"ZZZ", "zzz"},
+ },
+ {
+ in: LabelValues{"aaa", "AAA"},
+ out: LabelValues{"AAA", "aaa"},
+ },
+ }
+
+ for i, scenario := range scenarios {
+ sort.Sort(scenario.in)
+
+ for j, expected := range scenario.out {
+ if expected != scenario.in[j] {
+ t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j])
+ }
+ }
+ }
+}
+
+func TestLabelValues(t *testing.T) {
+ testLabelValues(t)
+}
+
+func BenchmarkLabelValues(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testLabelValues(b)
+ }
+}
+
+func TestLabelNameIsValid(t *testing.T) {
+ var scenarios = []struct {
+ ln LabelName
+ valid bool
+ }{
+ {
+ ln: "Avalid_23name",
+ valid: true,
+ },
+ {
+ ln: "_Avalid_23name",
+ valid: true,
+ },
+ {
+ ln: "1valid_23name",
+ valid: false,
+ },
+ {
+ ln: "avalid_23name",
+ valid: true,
+ },
+ {
+ ln: "Ava:lid_23name",
+ valid: false,
+ },
+ {
+ ln: "a lid_23name",
+ valid: false,
+ },
+ {
+ ln: ":leading_colon",
+ valid: false,
+ },
+ {
+ ln: "colon:in:the:middle",
+ valid: false,
+ },
+ }
+
+ for _, s := range scenarios {
+ if s.ln.IsValid() != s.valid {
+ t.Errorf("Expected %v for %q using IsValid method", s.valid, s.ln)
+ }
+ if LabelNameRE.MatchString(string(s.ln)) != s.valid {
+ t.Errorf("Expected %v for %q using regexp match", s.valid, s.ln)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
new file mode 100644
index 000000000..6eda08a73
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset.go
@@ -0,0 +1,169 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
+// may be fully-qualified down to the point where it may resolve to a single
+// Metric in the data store or not. All operations that occur within the realm
+// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
+// match.
+type LabelSet map[LabelName]LabelValue
+
+// Validate checks whether all names and values in the label set
+// are valid.
+func (ls LabelSet) Validate() error {
+ for ln, lv := range ls {
+ if !ln.IsValid() {
+ return fmt.Errorf("invalid name %q", ln)
+ }
+ if !lv.IsValid() {
+ return fmt.Errorf("invalid value %q", lv)
+ }
+ }
+ return nil
+}
+
+// Equal returns true iff both label sets have exactly the same key/value pairs.
+func (ls LabelSet) Equal(o LabelSet) bool {
+ if len(ls) != len(o) {
+ return false
+ }
+ for ln, lv := range ls {
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if olv != lv {
+ return false
+ }
+ }
+ return true
+}
+
+// Before compares the metrics, using the following criteria:
+//
+// If m has fewer labels than o, it is before o. If it has more, it is not.
+//
+// If the number of labels is the same, the superset of all label names is
+// sorted alphanumerically. The first differing label pair found in that order
+// determines the outcome: If the label does not exist at all in m, then m is
+// before o, and vice versa. Otherwise the label value is compared
+// alphanumerically.
+//
+// If m and o are equal, the method returns false.
+func (ls LabelSet) Before(o LabelSet) bool {
+ if len(ls) < len(o) {
+ return true
+ }
+ if len(ls) > len(o) {
+ return false
+ }
+
+ lns := make(LabelNames, 0, len(ls)+len(o))
+ for ln := range ls {
+ lns = append(lns, ln)
+ }
+ for ln := range o {
+ lns = append(lns, ln)
+ }
+ // It's probably not worth it to de-dup lns.
+ sort.Sort(lns)
+ for _, ln := range lns {
+ mlv, ok := ls[ln]
+ if !ok {
+ return true
+ }
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if mlv < olv {
+ return true
+ }
+ if mlv > olv {
+ return false
+ }
+ }
+ return false
+}
+
+// Clone returns a copy of the label set.
+func (ls LabelSet) Clone() LabelSet {
+ lsn := make(LabelSet, len(ls))
+ for ln, lv := range ls {
+ lsn[ln] = lv
+ }
+ return lsn
+}
+
+// Merge is a helper function to non-destructively merge two label sets.
+func (l LabelSet) Merge(other LabelSet) LabelSet {
+ result := make(LabelSet, len(l))
+
+ for k, v := range l {
+ result[k] = v
+ }
+
+ for k, v := range other {
+ result[k] = v
+ }
+
+ return result
+}
+
+func (l LabelSet) String() string {
+ lstrs := make([]string, 0, len(l))
+ for l, v := range l {
+ lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
+ }
+
+ sort.Strings(lstrs)
+ return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
+}
+
+// Fingerprint returns the LabelSet's fingerprint.
+func (ls LabelSet) Fingerprint() Fingerprint {
+ return labelSetToFingerprint(ls)
+}
+
+// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (ls LabelSet) FastFingerprint() Fingerprint {
+ return labelSetToFastFingerprint(ls)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (l *LabelSet) UnmarshalJSON(b []byte) error {
+ var m map[LabelName]LabelValue
+ if err := json.Unmarshal(b, &m); err != nil {
+ return err
+ }
+ // encoding/json only unmarshals maps of the form map[string]T. It treats
+ // LabelName as a string and does not call its UnmarshalJSON method.
+ // Thus, we have to replicate the behavior here.
+ for ln := range m {
+ if !ln.IsValid() {
+ return fmt.Errorf("%q is not a valid label name", ln)
+ }
+ }
+ *l = LabelSet(m)
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
new file mode 100644
index 000000000..9dff899cb
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -0,0 +1,103 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+var (
+ separator = []byte{0}
+ // MetricNameRE is a regular expression matching valid metric
+ // names. Note that the IsValidMetricName function performs the same
+ // check but faster than a match with this regular expression.
+ MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
+)
+
+// A Metric is similar to a LabelSet, but the key difference is that a Metric is
+// a singleton and refers to one and only one stream of samples.
+type Metric LabelSet
+
+// Equal compares the metrics.
+func (m Metric) Equal(o Metric) bool {
+ return LabelSet(m).Equal(LabelSet(o))
+}
+
+// Before compares the metrics' underlying label sets.
+func (m Metric) Before(o Metric) bool {
+ return LabelSet(m).Before(LabelSet(o))
+}
+
+// Clone returns a copy of the Metric.
+func (m Metric) Clone() Metric {
+ clone := Metric{}
+ for k, v := range m {
+ clone[k] = v
+ }
+ return clone
+}
+
+func (m Metric) String() string {
+ metricName, hasName := m[MetricNameLabel]
+ numLabels := len(m) - 1
+ if !hasName {
+ numLabels = len(m)
+ }
+ labelStrings := make([]string, 0, numLabels)
+ for label, value := range m {
+ if label != MetricNameLabel {
+ labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
+ }
+ }
+
+ switch numLabels {
+ case 0:
+ if hasName {
+ return string(metricName)
+ }
+ return "{}"
+ default:
+ sort.Strings(labelStrings)
+ return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+ }
+}
+
+// Fingerprint returns a Metric's Fingerprint.
+func (m Metric) Fingerprint() Fingerprint {
+ return LabelSet(m).Fingerprint()
+}
+
+// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (m Metric) FastFingerprint() Fingerprint {
+ return LabelSet(m).FastFingerprint()
+}
+
+// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
+// This function, however, does not use MetricNameRE for the check but a much
+// faster hardcoded implementation.
+func IsValidMetricName(n LabelValue) bool {
+ if len(n) == 0 {
+ return false
+ }
+ for i, b := range n {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/prometheus/common/model/metric_test.go b/vendor/github.com/prometheus/common/model/metric_test.go
new file mode 100644
index 000000000..06f9de525
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metric_test.go
@@ -0,0 +1,132 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import "testing"
+
+func testMetric(t testing.TB) {
+ var scenarios = []struct {
+ input LabelSet
+ fingerprint Fingerprint
+ fastFingerprint Fingerprint
+ }{
+ {
+ input: LabelSet{},
+ fingerprint: 14695981039346656037,
+ fastFingerprint: 14695981039346656037,
+ },
+ {
+ input: LabelSet{
+ "first_name": "electro",
+ "occupation": "robot",
+ "manufacturer": "westinghouse",
+ },
+ fingerprint: 5911716720268894962,
+ fastFingerprint: 11310079640881077873,
+ },
+ {
+ input: LabelSet{
+ "x": "y",
+ },
+ fingerprint: 8241431561484471700,
+ fastFingerprint: 13948396922932177635,
+ },
+ {
+ input: LabelSet{
+ "a": "bb",
+ "b": "c",
+ },
+ fingerprint: 3016285359649981711,
+ fastFingerprint: 3198632812309449502,
+ },
+ {
+ input: LabelSet{
+ "a": "b",
+ "bb": "c",
+ },
+ fingerprint: 7122421792099404749,
+ fastFingerprint: 5774953389407657638,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ input := Metric(scenario.input)
+
+ if scenario.fingerprint != input.Fingerprint() {
+ t.Errorf("%d. expected %d, got %d", i, scenario.fingerprint, input.Fingerprint())
+ }
+ if scenario.fastFingerprint != input.FastFingerprint() {
+ t.Errorf("%d. expected %d, got %d", i, scenario.fastFingerprint, input.FastFingerprint())
+ }
+ }
+}
+
+func TestMetric(t *testing.T) {
+ testMetric(t)
+}
+
+func BenchmarkMetric(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testMetric(b)
+ }
+}
+
+func TestMetricNameIsValid(t *testing.T) {
+ var scenarios = []struct {
+ mn LabelValue
+ valid bool
+ }{
+ {
+ mn: "Avalid_23name",
+ valid: true,
+ },
+ {
+ mn: "_Avalid_23name",
+ valid: true,
+ },
+ {
+ mn: "1valid_23name",
+ valid: false,
+ },
+ {
+ mn: "avalid_23name",
+ valid: true,
+ },
+ {
+ mn: "Ava:lid_23name",
+ valid: true,
+ },
+ {
+ mn: "a lid_23name",
+ valid: false,
+ },
+ {
+ mn: ":leading_colon",
+ valid: true,
+ },
+ {
+ mn: "colon:in:the:middle",
+ valid: true,
+ },
+ }
+
+ for _, s := range scenarios {
+ if IsValidMetricName(s.mn) != s.valid {
+ t.Errorf("Expected %v for %q using IsValidMetricName function", s.valid, s.mn)
+ }
+ if MetricNameRE.MatchString(string(s.mn)) != s.valid {
+ t.Errorf("Expected %v for %q using regexp matching", s.valid, s.mn)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go
new file mode 100644
index 000000000..a7b969170
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/model.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package model contains common data structures that are shared across
+// Prometheus components and libraries.
+package model
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
new file mode 100644
index 000000000..8762b13c6
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/signature.go
@@ -0,0 +1,144 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "sort"
+)
+
+// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
+// used to separate label names, label values, and other strings from each other
+// when calculating their combined hash value (aka signature aka fingerprint).
+const SeparatorByte byte = 255
+
+var (
+ // cache the signature of an empty label set.
+ emptyLabelSignature = hashNew()
+)
+
+// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
+// given label set. (Collisions are possible but unlikely if the number of label
+// sets the function is applied to is small.)
+func LabelsToSignature(labels map[string]string) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make([]string, 0, len(labels))
+ for labelName := range labels {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Strings(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, labelName)
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, labels[labelName])
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
+// parameter (rather than a label map) and returns a Fingerprint.
+func labelSetToFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ labelNames := make(LabelNames, 0, len(ls))
+ for labelName := range ls {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(ls[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return Fingerprint(sum)
+}
+
+// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
+// faster and less allocation-heavy hash function, which is more susceptible to
+// create hash collisions. Therefore, collision detection should be applied.
+func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ var result uint64
+ for labelName, labelValue := range ls {
+ sum := hashNew()
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(labelValue))
+ result ^= sum
+ }
+ return Fingerprint(result)
+}
+
+// SignatureForLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and only includes the labels with the
+// specified LabelNames into the signature calculation. The labels passed in
+// will be sorted by this function.
+func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ sort.Sort(LabelNames(labels))
+
+ sum := hashNew()
+ for _, label := range labels {
+ sum = hashAdd(sum, string(label))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[label]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and excludes the labels with any of the
+// specified LabelNames from the signature calculation.
+func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
+ if len(m) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make(LabelNames, 0, len(m))
+ for labelName := range m {
+ if _, exclude := labels[labelName]; !exclude {
+ labelNames = append(labelNames, labelName)
+ }
+ }
+ if len(labelNames) == 0 {
+ return emptyLabelSignature
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
diff --git a/vendor/github.com/prometheus/common/model/signature_test.go b/vendor/github.com/prometheus/common/model/signature_test.go
new file mode 100644
index 000000000..d59c8a8c3
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/signature_test.go
@@ -0,0 +1,314 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "runtime"
+ "sync"
+ "testing"
+)
+
+func TestLabelsToSignature(t *testing.T) {
+ var scenarios = []struct {
+ in map[string]string
+ out uint64
+ }{
+ {
+ in: map[string]string{},
+ out: 14695981039346656037,
+ },
+ {
+ in: map[string]string{"name": "garland, briggs", "fear": "love is not enough"},
+ out: 5799056148416392346,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ actual := LabelsToSignature(scenario.in)
+
+ if actual != scenario.out {
+ t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
+ }
+ }
+}
+
+func TestMetricToFingerprint(t *testing.T) {
+ var scenarios = []struct {
+ in LabelSet
+ out Fingerprint
+ }{
+ {
+ in: LabelSet{},
+ out: 14695981039346656037,
+ },
+ {
+ in: LabelSet{"name": "garland, briggs", "fear": "love is not enough"},
+ out: 5799056148416392346,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ actual := labelSetToFingerprint(scenario.in)
+
+ if actual != scenario.out {
+ t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
+ }
+ }
+}
+
+func TestMetricToFastFingerprint(t *testing.T) {
+ var scenarios = []struct {
+ in LabelSet
+ out Fingerprint
+ }{
+ {
+ in: LabelSet{},
+ out: 14695981039346656037,
+ },
+ {
+ in: LabelSet{"name": "garland, briggs", "fear": "love is not enough"},
+ out: 12952432476264840823,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ actual := labelSetToFastFingerprint(scenario.in)
+
+ if actual != scenario.out {
+ t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
+ }
+ }
+}
+
+func TestSignatureForLabels(t *testing.T) {
+ var scenarios = []struct {
+ in Metric
+ labels LabelNames
+ out uint64
+ }{
+ {
+ in: Metric{},
+ labels: nil,
+ out: 14695981039346656037,
+ },
+ {
+ in: Metric{},
+ labels: LabelNames{"empty"},
+ out: 7187873163539638612,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: LabelNames{"empty"},
+ out: 7187873163539638612,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: LabelNames{"fear", "name"},
+ out: 5799056148416392346,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"},
+ labels: LabelNames{"fear", "name"},
+ out: 5799056148416392346,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: LabelNames{},
+ out: 14695981039346656037,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: nil,
+ out: 14695981039346656037,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ actual := SignatureForLabels(scenario.in, scenario.labels...)
+
+ if actual != scenario.out {
+ t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
+ }
+ }
+}
+
+func TestSignatureWithoutLabels(t *testing.T) {
+ var scenarios = []struct {
+ in Metric
+ labels map[LabelName]struct{}
+ out uint64
+ }{
+ {
+ in: Metric{},
+ labels: nil,
+ out: 14695981039346656037,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: map[LabelName]struct{}{"fear": struct{}{}, "name": struct{}{}},
+ out: 14695981039346656037,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"},
+ labels: map[LabelName]struct{}{"foo": struct{}{}},
+ out: 5799056148416392346,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: map[LabelName]struct{}{},
+ out: 5799056148416392346,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: nil,
+ out: 5799056148416392346,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ actual := SignatureWithoutLabels(scenario.in, scenario.labels)
+
+ if actual != scenario.out {
+ t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
+ }
+ }
+}
+
+func benchmarkLabelToSignature(b *testing.B, l map[string]string, e uint64) {
+ for i := 0; i < b.N; i++ {
+ if a := LabelsToSignature(l); a != e {
+ b.Fatalf("expected signature of %d for %s, got %d", e, l, a)
+ }
+ }
+}
+
+func BenchmarkLabelToSignatureScalar(b *testing.B) {
+ benchmarkLabelToSignature(b, nil, 14695981039346656037)
+}
+
+func BenchmarkLabelToSignatureSingle(b *testing.B) {
+ benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value"}, 5146282821936882169)
+}
+
+func BenchmarkLabelToSignatureDouble(b *testing.B) {
+ benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717)
+}
+
+func BenchmarkLabelToSignatureTriple(b *testing.B) {
+ benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121)
+}
+
+func benchmarkMetricToFingerprint(b *testing.B, ls LabelSet, e Fingerprint) {
+ for i := 0; i < b.N; i++ {
+ if a := labelSetToFingerprint(ls); a != e {
+ b.Fatalf("expected signature of %d for %s, got %d", e, ls, a)
+ }
+ }
+}
+
+func BenchmarkMetricToFingerprintScalar(b *testing.B) {
+ benchmarkMetricToFingerprint(b, nil, 14695981039346656037)
+}
+
+func BenchmarkMetricToFingerprintSingle(b *testing.B) {
+ benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value"}, 5146282821936882169)
+}
+
+func BenchmarkMetricToFingerprintDouble(b *testing.B) {
+ benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717)
+}
+
+func BenchmarkMetricToFingerprintTriple(b *testing.B) {
+ benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121)
+}
+
+func benchmarkMetricToFastFingerprint(b *testing.B, ls LabelSet, e Fingerprint) {
+ for i := 0; i < b.N; i++ {
+ if a := labelSetToFastFingerprint(ls); a != e {
+ b.Fatalf("expected signature of %d for %s, got %d", e, ls, a)
+ }
+ }
+}
+
+func BenchmarkMetricToFastFingerprintScalar(b *testing.B) {
+ benchmarkMetricToFastFingerprint(b, nil, 14695981039346656037)
+}
+
+func BenchmarkMetricToFastFingerprintSingle(b *testing.B) {
+ benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value"}, 5147259542624943964)
+}
+
+func BenchmarkMetricToFastFingerprintDouble(b *testing.B) {
+ benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value"}, 18269973311206963528)
+}
+
+func BenchmarkMetricToFastFingerprintTriple(b *testing.B) {
+ benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676)
+}
+
+func BenchmarkEmptyLabelSignature(b *testing.B) {
+ input := []map[string]string{nil, {}}
+
+ var ms runtime.MemStats
+ runtime.ReadMemStats(&ms)
+
+ alloc := ms.Alloc
+
+ for _, labels := range input {
+ LabelsToSignature(labels)
+ }
+
+ runtime.ReadMemStats(&ms)
+
+ if got := ms.Alloc; alloc != got {
+ b.Fatal("expected LabelsToSignature with empty labels not to perform allocations")
+ }
+}
+
+func benchmarkMetricToFastFingerprintConc(b *testing.B, ls LabelSet, e Fingerprint, concLevel int) {
+ var start, end sync.WaitGroup
+ start.Add(1)
+ end.Add(concLevel)
+
+ for i := 0; i < concLevel; i++ {
+ go func() {
+ start.Wait()
+ for j := b.N / concLevel; j >= 0; j-- {
+ if a := labelSetToFastFingerprint(ls); a != e {
+ b.Fatalf("expected signature of %d for %s, got %d", e, ls, a)
+ }
+ }
+ end.Done()
+ }()
+ }
+ b.ResetTimer()
+ start.Done()
+ end.Wait()
+}
+
+func BenchmarkMetricToFastFingerprintTripleConc1(b *testing.B) {
+ benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 1)
+}
+
+func BenchmarkMetricToFastFingerprintTripleConc2(b *testing.B) {
+ benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 2)
+}
+
+func BenchmarkMetricToFastFingerprintTripleConc4(b *testing.B) {
+ benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 4)
+}
+
+func BenchmarkMetricToFastFingerprintTripleConc8(b *testing.B) {
+ benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 8)
+}
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
new file mode 100644
index 000000000..7538e2997
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -0,0 +1,106 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "time"
+)
+
+// Matcher describes a matches the value of a given label.
+type Matcher struct {
+ Name LabelName `json:"name"`
+ Value string `json:"value"`
+ IsRegex bool `json:"isRegex"`
+}
+
+func (m *Matcher) UnmarshalJSON(b []byte) error {
+ type plain Matcher
+ if err := json.Unmarshal(b, (*plain)(m)); err != nil {
+ return err
+ }
+
+ if len(m.Name) == 0 {
+ return fmt.Errorf("label name in matcher must not be empty")
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Validate returns true iff all fields of the matcher have valid values.
+func (m *Matcher) Validate() error {
+ if !m.Name.IsValid() {
+ return fmt.Errorf("invalid name %q", m.Name)
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return fmt.Errorf("invalid regular expression %q", m.Value)
+ }
+ } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
+ return fmt.Errorf("invalid value %q", m.Value)
+ }
+ return nil
+}
+
+// Silence defines the representation of a silence definiton
+// in the Prometheus eco-system.
+type Silence struct {
+ ID uint64 `json:"id,omitempty"`
+
+ Matchers []*Matcher `json:"matchers"`
+
+ StartsAt time.Time `json:"startsAt"`
+ EndsAt time.Time `json:"endsAt"`
+
+ CreatedAt time.Time `json:"createdAt,omitempty"`
+ CreatedBy string `json:"createdBy"`
+ Comment string `json:"comment,omitempty"`
+}
+
+// Validate returns true iff all fields of the silence have valid values.
+func (s *Silence) Validate() error {
+ if len(s.Matchers) == 0 {
+ return fmt.Errorf("at least one matcher required")
+ }
+ for _, m := range s.Matchers {
+ if err := m.Validate(); err != nil {
+ return fmt.Errorf("invalid matcher: %s", err)
+ }
+ }
+ if s.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if s.EndsAt.IsZero() {
+ return fmt.Errorf("end time missing")
+ }
+ if s.EndsAt.Before(s.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if s.CreatedBy == "" {
+ return fmt.Errorf("creator information missing")
+ }
+ if s.Comment == "" {
+ return fmt.Errorf("comment missing")
+ }
+ if s.CreatedAt.IsZero() {
+ return fmt.Errorf("creation timestamp missing")
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/silence_test.go b/vendor/github.com/prometheus/common/model/silence_test.go
new file mode 100644
index 000000000..8eaaf0744
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/silence_test.go
@@ -0,0 +1,228 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestMatcherValidate(t *testing.T) {
+ var cases = []struct {
+ matcher *Matcher
+ err string
+ }{
+ {
+ matcher: &Matcher{
+ Name: "name",
+ Value: "value",
+ },
+ },
+ {
+ matcher: &Matcher{
+ Name: "name",
+ Value: "value",
+ IsRegex: true,
+ },
+ },
+ {
+ matcher: &Matcher{
+ Name: "name!",
+ Value: "value",
+ },
+ err: "invalid name",
+ },
+ {
+ matcher: &Matcher{
+ Name: "",
+ Value: "value",
+ },
+ err: "invalid name",
+ },
+ {
+ matcher: &Matcher{
+ Name: "name",
+ Value: "value\xff",
+ },
+ err: "invalid value",
+ },
+ {
+ matcher: &Matcher{
+ Name: "name",
+ Value: "",
+ },
+ err: "invalid value",
+ },
+ }
+
+ for i, c := range cases {
+ err := c.matcher.Validate()
+ if err == nil {
+ if c.err == "" {
+ continue
+ }
+ t.Errorf("%d. Expected error %q but got none", i, c.err)
+ continue
+ }
+ if c.err == "" && err != nil {
+ t.Errorf("%d. Expected no error but got %q", i, err)
+ continue
+ }
+ if !strings.Contains(err.Error(), c.err) {
+ t.Errorf("%d. Expected error to contain %q but got %q", i, c.err, err)
+ }
+ }
+}
+
+func TestSilenceValidate(t *testing.T) {
+ ts := time.Now()
+
+ var cases = []struct {
+ sil *Silence
+ err string
+ }{
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ StartsAt: ts,
+ EndsAt: ts,
+ CreatedAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ {Name: "name", Value: "value"},
+ {Name: "name", Value: "value"},
+ {Name: "name", Value: "value", IsRegex: true},
+ },
+ StartsAt: ts,
+ EndsAt: ts,
+ CreatedAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ StartsAt: ts,
+ EndsAt: ts.Add(-1 * time.Minute),
+ CreatedAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ err: "start time must be before end time",
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ StartsAt: ts,
+ CreatedAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ err: "end time missing",
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ EndsAt: ts,
+ CreatedAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ err: "start time missing",
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "!name", Value: "value"},
+ },
+ StartsAt: ts,
+ EndsAt: ts,
+ CreatedAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ err: "invalid matcher",
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ StartsAt: ts,
+ EndsAt: ts,
+ CreatedAt: ts,
+ CreatedBy: "name",
+ },
+ err: "comment missing",
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ StartsAt: ts,
+ EndsAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ err: "creation timestamp missing",
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ StartsAt: ts,
+ EndsAt: ts,
+ CreatedAt: ts,
+ Comment: "comment",
+ },
+ err: "creator information missing",
+ },
+ }
+
+ for i, c := range cases {
+ err := c.sil.Validate()
+ if err == nil {
+ if c.err == "" {
+ continue
+ }
+ t.Errorf("%d. Expected error %q but got none", i, c.err)
+ continue
+ }
+ if c.err == "" && err != nil {
+ t.Errorf("%d. Expected no error but got %q", i, err)
+ continue
+ }
+ if !strings.Contains(err.Error(), c.err) {
+ t.Errorf("%d. Expected error to contain %q but got %q", i, c.err, err)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
new file mode 100644
index 000000000..548968aeb
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -0,0 +1,249 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ // MinimumTick is the minimum supported time resolution. This has to be
+ // at least time.Second in order for the code below to work.
+ minimumTick = time.Millisecond
+ // second is the Time duration equivalent to one second.
+ second = int64(time.Second / minimumTick)
+ // The number of nanoseconds per minimum tick.
+ nanosPerTick = int64(minimumTick / time.Nanosecond)
+
+ // Earliest is the earliest Time representable. Handy for
+ // initializing a high watermark.
+ Earliest = Time(math.MinInt64)
+ // Latest is the latest Time representable. Handy for initializing
+ // a low watermark.
+ Latest = Time(math.MaxInt64)
+)
+
+// Time is the number of milliseconds since the epoch
+// (1970-01-01 00:00 UTC) excluding leap seconds.
+type Time int64
+
+// Interval describes and interval between two timestamps.
+type Interval struct {
+ Start, End Time
+}
+
+// Now returns the current time as a Time.
+func Now() Time {
+ return TimeFromUnixNano(time.Now().UnixNano())
+}
+
+// TimeFromUnix returns the Time equivalent to the Unix Time t
+// provided in seconds.
+func TimeFromUnix(t int64) Time {
+ return Time(t * second)
+}
+
+// TimeFromUnixNano returns the Time equivalent to the Unix Time
+// t provided in nanoseconds.
+func TimeFromUnixNano(t int64) Time {
+ return Time(t / nanosPerTick)
+}
+
+// Equal reports whether two Times represent the same instant.
+func (t Time) Equal(o Time) bool {
+ return t == o
+}
+
+// Before reports whether the Time t is before o.
+func (t Time) Before(o Time) bool {
+ return t < o
+}
+
+// After reports whether the Time t is after o.
+func (t Time) After(o Time) bool {
+ return t > o
+}
+
+// Add returns the Time t + d.
+func (t Time) Add(d time.Duration) Time {
+ return t + Time(d/minimumTick)
+}
+
+// Sub returns the Duration t - o.
+func (t Time) Sub(o Time) time.Duration {
+ return time.Duration(t-o) * minimumTick
+}
+
+// Time returns the time.Time representation of t.
+func (t Time) Time() time.Time {
+ return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) Unix() int64 {
+ return int64(t) / second
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) UnixNano() int64 {
+ return int64(t) * nanosPerTick
+}
+
+// The number of digits after the dot.
+var dotPrecision = int(math.Log10(float64(second)))
+
+// String returns a string representation of the Time.
+func (t Time) String() string {
+ return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+ return []byte(t.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+ p := strings.Split(string(b), ".")
+ switch len(p) {
+ case 1:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ *t = Time(v * second)
+
+ case 2:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ v *= second
+
+ prec := dotPrecision - len(p[1])
+ if prec < 0 {
+ p[1] = p[1][:dotPrecision]
+ } else if prec > 0 {
+ p[1] = p[1] + strings.Repeat("0", prec)
+ }
+
+ va, err := strconv.ParseInt(p[1], 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *t = Time(v + va)
+
+ default:
+ return fmt.Errorf("invalid time %q", string(b))
+ }
+ return nil
+}
+
+// Duration wraps time.Duration. It is used to parse the custom duration format
+// from YAML.
+// This type should not propagate beyond the scope of input/output processing.
+type Duration time.Duration
+
+var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
+
+// StringToDuration parses a string into a time.Duration, assuming that a year
+// always has 365d, a week always has 7d, and a day always has 24h.
+func ParseDuration(durationStr string) (Duration, error) {
+ matches := durationRE.FindStringSubmatch(durationStr)
+ if len(matches) != 3 {
+ return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
+ }
+ var (
+ n, _ = strconv.Atoi(matches[1])
+ dur = time.Duration(n) * time.Millisecond
+ )
+ switch unit := matches[2]; unit {
+ case "y":
+ dur *= 1000 * 60 * 60 * 24 * 365
+ case "w":
+ dur *= 1000 * 60 * 60 * 24 * 7
+ case "d":
+ dur *= 1000 * 60 * 60 * 24
+ case "h":
+ dur *= 1000 * 60 * 60
+ case "m":
+ dur *= 1000 * 60
+ case "s":
+ dur *= 1000
+ case "ms":
+ // Value already correct
+ default:
+ return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
+ }
+ return Duration(dur), nil
+}
+
+func (d Duration) String() string {
+ var (
+ ms = int64(time.Duration(d) / time.Millisecond)
+ unit = "ms"
+ )
+ factors := map[string]int64{
+ "y": 1000 * 60 * 60 * 24 * 365,
+ "w": 1000 * 60 * 60 * 24 * 7,
+ "d": 1000 * 60 * 60 * 24,
+ "h": 1000 * 60 * 60,
+ "m": 1000 * 60,
+ "s": 1000,
+ "ms": 1,
+ }
+
+ switch int64(0) {
+ case ms % factors["y"]:
+ unit = "y"
+ case ms % factors["w"]:
+ unit = "w"
+ case ms % factors["d"]:
+ unit = "d"
+ case ms % factors["h"]:
+ unit = "h"
+ case ms % factors["m"]:
+ unit = "m"
+ case ms % factors["s"]:
+ unit = "s"
+ }
+ return fmt.Sprintf("%v%v", ms/factors[unit], unit)
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (d Duration) MarshalYAML() (interface{}, error) {
+ return d.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ dur, err := ParseDuration(s)
+ if err != nil {
+ return err
+ }
+ *d = dur
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/time_test.go b/vendor/github.com/prometheus/common/model/time_test.go
new file mode 100644
index 000000000..45ffd872d
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/time_test.go
@@ -0,0 +1,129 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "testing"
+ "time"
+)
+
+func TestComparators(t *testing.T) {
+ t1a := TimeFromUnix(0)
+ t1b := TimeFromUnix(0)
+ t2 := TimeFromUnix(2*second - 1)
+
+ if !t1a.Equal(t1b) {
+ t.Fatalf("Expected %s to be equal to %s", t1a, t1b)
+ }
+ if t1a.Equal(t2) {
+ t.Fatalf("Expected %s to not be equal to %s", t1a, t2)
+ }
+
+ if !t1a.Before(t2) {
+ t.Fatalf("Expected %s to be before %s", t1a, t2)
+ }
+ if t1a.Before(t1b) {
+ t.Fatalf("Expected %s to not be before %s", t1a, t1b)
+ }
+
+ if !t2.After(t1a) {
+ t.Fatalf("Expected %s to be after %s", t2, t1a)
+ }
+ if t1b.After(t1a) {
+ t.Fatalf("Expected %s to not be after %s", t1b, t1a)
+ }
+}
+
+func TestTimeConversions(t *testing.T) {
+ unixSecs := int64(1136239445)
+ unixNsecs := int64(123456789)
+ unixNano := unixSecs*1e9 + unixNsecs
+
+ t1 := time.Unix(unixSecs, unixNsecs-unixNsecs%nanosPerTick)
+ t2 := time.Unix(unixSecs, unixNsecs)
+
+ ts := TimeFromUnixNano(unixNano)
+ if !ts.Time().Equal(t1) {
+ t.Fatalf("Expected %s, got %s", t1, ts.Time())
+ }
+
+ // Test available precision.
+ ts = TimeFromUnixNano(t2.UnixNano())
+ if !ts.Time().Equal(t1) {
+ t.Fatalf("Expected %s, got %s", t1, ts.Time())
+ }
+
+ if ts.UnixNano() != unixNano-unixNano%nanosPerTick {
+ t.Fatalf("Expected %d, got %d", unixNano, ts.UnixNano())
+ }
+}
+
+func TestDuration(t *testing.T) {
+ duration := time.Second + time.Minute + time.Hour
+ goTime := time.Unix(1136239445, 0)
+
+ ts := TimeFromUnix(goTime.Unix())
+ if !goTime.Add(duration).Equal(ts.Add(duration).Time()) {
+ t.Fatalf("Expected %s to be equal to %s", goTime.Add(duration), ts.Add(duration))
+ }
+
+ earlier := ts.Add(-duration)
+ delta := ts.Sub(earlier)
+ if delta != duration {
+ t.Fatalf("Expected %s to be equal to %s", delta, duration)
+ }
+}
+
+func TestParseDuration(t *testing.T) {
+ var cases = []struct {
+ in string
+ out time.Duration
+ }{
+ {
+ in: "324ms",
+ out: 324 * time.Millisecond,
+ }, {
+ in: "3s",
+ out: 3 * time.Second,
+ }, {
+ in: "5m",
+ out: 5 * time.Minute,
+ }, {
+ in: "1h",
+ out: time.Hour,
+ }, {
+ in: "4d",
+ out: 4 * 24 * time.Hour,
+ }, {
+ in: "3w",
+ out: 3 * 7 * 24 * time.Hour,
+ }, {
+ in: "10y",
+ out: 10 * 365 * 24 * time.Hour,
+ },
+ }
+
+ for _, c := range cases {
+ d, err := ParseDuration(c.in)
+ if err != nil {
+ t.Errorf("Unexpected error on input %q", c.in)
+ }
+ if time.Duration(d) != c.out {
+ t.Errorf("Expected %v but got %v", c.out, d)
+ }
+ if d.String() != c.in {
+ t.Errorf("Expected duration string %q but got %q", c.in, d.String())
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
new file mode 100644
index 000000000..7728abaee
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -0,0 +1,419 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+var (
+ // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+ // non-existing sample pair. It is a SamplePair with timestamp Earliest and
+ // value 0.0. Note that the natural zero value of SamplePair has a timestamp
+ // of 0, which is possible to appear in a real SamplePair and thus not
+ // suitable to signal a non-existing SamplePair.
+ ZeroSamplePair = SamplePair{Timestamp: Earliest}
+
+ // ZeroSample is the pseudo zero-value of Sample used to signal a
+ // non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+ // and metric nil. Note that the natural zero value of Sample has a timestamp
+ // of 0, which is possible to appear in a real Sample and thus not suitable
+ // to signal a non-existing Sample.
+ ZeroSample = Sample{Timestamp: Earliest}
+)
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+ if v == o {
+ return true
+ }
+ return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The sematics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
+
+// Sample is a sample pair associated with a metric.
+type Sample struct {
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+// Equal compares first the metrics, then the timestamp, then the value. The
+// sematics of value equality is defined by SampleValue.Equal.
+func (s *Sample) Equal(o *Sample) bool {
+ if s == o {
+ return true
+ }
+
+ if !s.Metric.Equal(o.Metric) {
+ return false
+ }
+ if !s.Timestamp.Equal(o.Timestamp) {
+ return false
+ }
+ if s.Value.Equal(o.Value) {
+ return false
+ }
+
+ return true
+}
+
+func (s Sample) String() string {
+ return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ })
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Sample) MarshalJSON() ([]byte, error) {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ return json.Marshal(&v)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Sample) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ s.Metric = v.Metric
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+
+ return nil
+}
+
+// Samples is a sortable Sample slice. It implements sort.Interface.
+type Samples []*Sample
+
+func (s Samples) Len() int {
+ return len(s)
+}
+
+// Less compares first the metrics, then the timestamp.
+func (s Samples) Less(i, j int) bool {
+ switch {
+ case s[i].Metric.Before(s[j].Metric):
+ return true
+ case s[j].Metric.Before(s[i].Metric):
+ return false
+ case s[i].Timestamp.Before(s[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+func (s Samples) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (s Samples) Equal(o Samples) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, sample := range s {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// SampleStream is a stream of Values belonging to an attached COWMetric.
+type SampleStream struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+}
+
+func (ss SampleStream) String() string {
+ vals := make([]string, len(ss.Values))
+ for i, v := range ss.Values {
+ vals[i] = v.String()
+ }
+ return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
+}
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "<ValNone>":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return "<ValNone>"
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
+
+// Scalar is a scalar value evaluated at the set timestamp.
+type Scalar struct {
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s Scalar) String() string {
+ return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Scalar) MarshalJSON() ([]byte, error) {
+ v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
+ return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Scalar) UnmarshalJSON(b []byte) error {
+ var f string
+ v := [...]interface{}{&s.Timestamp, &f}
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ value, err := strconv.ParseFloat(f, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing sample value: %s", err)
+ }
+ s.Value = SampleValue(value)
+ return nil
+}
+
+// String is a string value evaluated at the set timestamp.
+type String struct {
+ Value string `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s *String) String() string {
+ return s.Value
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s String) MarshalJSON() ([]byte, error) {
+ return json.Marshal([]interface{}{s.Timestamp, s.Value})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *String) UnmarshalJSON(b []byte) error {
+ v := [...]interface{}{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Vector is basically only an alias for Samples, but the
+// contract is that in a Vector, all Samples have the same timestamp.
+type Vector []*Sample
+
+func (vec Vector) String() string {
+ entries := make([]string, len(vec))
+ for i, s := range vec {
+ entries[i] = s.String()
+ }
+ return strings.Join(entries, "\n")
+}
+
+func (vec Vector) Len() int { return len(vec) }
+func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
+
+// Less compares first the metrics, then the timestamp.
+func (vec Vector) Less(i, j int) bool {
+ switch {
+ case vec[i].Metric.Before(vec[j].Metric):
+ return true
+ case vec[j].Metric.Before(vec[i].Metric):
+ return false
+ case vec[i].Timestamp.Before(vec[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (vec Vector) Equal(o Vector) bool {
+ if len(vec) != len(o) {
+ return false
+ }
+
+ for i, sample := range vec {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Matrix is a list of time series.
+type Matrix []*SampleStream
+
+func (m Matrix) Len() int { return len(m) }
+func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
+func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
+
+func (mat Matrix) String() string {
+ matCp := make(Matrix, len(mat))
+ copy(matCp, mat)
+ sort.Sort(matCp)
+
+ strs := make([]string, len(matCp))
+
+ for i, ss := range matCp {
+ strs[i] = ss.String()
+ }
+
+ return strings.Join(strs, "\n")
+}
diff --git a/vendor/github.com/prometheus/common/model/value_test.go b/vendor/github.com/prometheus/common/model/value_test.go
new file mode 100644
index 000000000..8d2b69ea1
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_test.go
@@ -0,0 +1,417 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "math"
+ "reflect"
+ "sort"
+ "testing"
+)
+
+func TestEqual(t *testing.T) {
+ tests := map[string]struct {
+ in1, in2 SampleValue
+ want bool
+ }{
+ "equal floats": {
+ in1: 3.14,
+ in2: 3.14,
+ want: true,
+ },
+ "unequal floats": {
+ in1: 3.14,
+ in2: 3.1415,
+ want: false,
+ },
+ "positive inifinities": {
+ in1: SampleValue(math.Inf(+1)),
+ in2: SampleValue(math.Inf(+1)),
+ want: true,
+ },
+ "negative inifinities": {
+ in1: SampleValue(math.Inf(-1)),
+ in2: SampleValue(math.Inf(-1)),
+ want: true,
+ },
+ "different inifinities": {
+ in1: SampleValue(math.Inf(+1)),
+ in2: SampleValue(math.Inf(-1)),
+ want: false,
+ },
+ "number and infinity": {
+ in1: 42,
+ in2: SampleValue(math.Inf(+1)),
+ want: false,
+ },
+ "number and NaN": {
+ in1: 42,
+ in2: SampleValue(math.NaN()),
+ want: false,
+ },
+ "NaNs": {
+ in1: SampleValue(math.NaN()),
+ in2: SampleValue(math.NaN()),
+ want: true, // !!!
+ },
+ }
+
+ for name, test := range tests {
+ got := test.in1.Equal(test.in2)
+ if got != test.want {
+ t.Errorf("Comparing %s, %f and %f: got %t, want %t", name, test.in1, test.in2, got, test.want)
+ }
+ }
+}
+
+func TestSamplePairJSON(t *testing.T) {
+ input := []struct {
+ plain string
+ value SamplePair
+ }{
+ {
+ plain: `[1234.567,"123.1"]`,
+ value: SamplePair{
+ Value: 123.1,
+ Timestamp: 1234567,
+ },
+ },
+ }
+
+ for _, test := range input {
+ b, err := json.Marshal(test.value)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if string(b) != test.plain {
+ t.Errorf("encoding error: expected %q, got %q", test.plain, b)
+ continue
+ }
+
+ var sp SamplePair
+ err = json.Unmarshal(b, &sp)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if sp != test.value {
+ t.Errorf("decoding error: expected %v, got %v", test.value, sp)
+ }
+ }
+}
+
+func TestSampleJSON(t *testing.T) {
+ input := []struct {
+ plain string
+ value Sample
+ }{
+ {
+ plain: `{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]}`,
+ value: Sample{
+ Metric: Metric{
+ MetricNameLabel: "test_metric",
+ },
+ Value: 123.1,
+ Timestamp: 1234567,
+ },
+ },
+ }
+
+ for _, test := range input {
+ b, err := json.Marshal(test.value)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if string(b) != test.plain {
+ t.Errorf("encoding error: expected %q, got %q", test.plain, b)
+ continue
+ }
+
+ var sv Sample
+ err = json.Unmarshal(b, &sv)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if !reflect.DeepEqual(sv, test.value) {
+ t.Errorf("decoding error: expected %v, got %v", test.value, sv)
+ }
+ }
+}
+
+func TestVectorJSON(t *testing.T) {
+ input := []struct {
+ plain string
+ value Vector
+ }{
+ {
+ plain: `[]`,
+ value: Vector{},
+ },
+ {
+ plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]}]`,
+ value: Vector{&Sample{
+ Metric: Metric{
+ MetricNameLabel: "test_metric",
+ },
+ Value: 123.1,
+ Timestamp: 1234567,
+ }},
+ },
+ {
+ plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]},{"metric":{"foo":"bar"},"value":[1.234,"+Inf"]}]`,
+ value: Vector{
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "test_metric",
+ },
+ Value: 123.1,
+ Timestamp: 1234567,
+ },
+ &Sample{
+ Metric: Metric{
+ "foo": "bar",
+ },
+ Value: SampleValue(math.Inf(1)),
+ Timestamp: 1234,
+ },
+ },
+ },
+ }
+
+ for _, test := range input {
+ b, err := json.Marshal(test.value)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if string(b) != test.plain {
+ t.Errorf("encoding error: expected %q, got %q", test.plain, b)
+ continue
+ }
+
+ var vec Vector
+ err = json.Unmarshal(b, &vec)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if !reflect.DeepEqual(vec, test.value) {
+ t.Errorf("decoding error: expected %v, got %v", test.value, vec)
+ }
+ }
+}
+
+func TestScalarJSON(t *testing.T) {
+ input := []struct {
+ plain string
+ value Scalar
+ }{
+ {
+ plain: `[123.456,"456"]`,
+ value: Scalar{
+ Timestamp: 123456,
+ Value: 456,
+ },
+ },
+ {
+ plain: `[123123.456,"+Inf"]`,
+ value: Scalar{
+ Timestamp: 123123456,
+ Value: SampleValue(math.Inf(1)),
+ },
+ },
+ {
+ plain: `[123123.456,"-Inf"]`,
+ value: Scalar{
+ Timestamp: 123123456,
+ Value: SampleValue(math.Inf(-1)),
+ },
+ },
+ }
+
+ for _, test := range input {
+ b, err := json.Marshal(test.value)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if string(b) != test.plain {
+ t.Errorf("encoding error: expected %q, got %q", test.plain, b)
+ continue
+ }
+
+ var sv Scalar
+ err = json.Unmarshal(b, &sv)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if sv != test.value {
+ t.Errorf("decoding error: expected %v, got %v", test.value, sv)
+ }
+ }
+}
+
+func TestStringJSON(t *testing.T) {
+ input := []struct {
+ plain string
+ value String
+ }{
+ {
+ plain: `[123.456,"test"]`,
+ value: String{
+ Timestamp: 123456,
+ Value: "test",
+ },
+ },
+ {
+ plain: `[123123.456,"台北"]`,
+ value: String{
+ Timestamp: 123123456,
+ Value: "台北",
+ },
+ },
+ }
+
+ for _, test := range input {
+ b, err := json.Marshal(test.value)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if string(b) != test.plain {
+ t.Errorf("encoding error: expected %q, got %q", test.plain, b)
+ continue
+ }
+
+ var sv String
+ err = json.Unmarshal(b, &sv)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if sv != test.value {
+ t.Errorf("decoding error: expected %v, got %v", test.value, sv)
+ }
+ }
+}
+
+func TestVectorSort(t *testing.T) {
+ input := Vector{
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "A",
+ },
+ Timestamp: 1,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "A",
+ },
+ Timestamp: 2,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "C",
+ },
+ Timestamp: 1,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "C",
+ },
+ Timestamp: 2,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "B",
+ },
+ Timestamp: 1,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "B",
+ },
+ Timestamp: 2,
+ },
+ }
+
+ expected := Vector{
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "A",
+ },
+ Timestamp: 1,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "A",
+ },
+ Timestamp: 2,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "B",
+ },
+ Timestamp: 1,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "B",
+ },
+ Timestamp: 2,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "C",
+ },
+ Timestamp: 1,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "C",
+ },
+ Timestamp: 2,
+ },
+ }
+
+ sort.Sort(input)
+
+ for i, actual := range input {
+ actualFp := actual.Metric.Fingerprint()
+ expectedFp := expected[i].Metric.Fingerprint()
+
+ if actualFp != expectedFp {
+ t.Fatalf("%d. Incorrect fingerprint. Got %s; want %s", i, actualFp.String(), expectedFp.String())
+ }
+
+ if actual.Timestamp != expected[i].Timestamp {
+ t.Fatalf("%d. Incorrect timestamp. Got %s; want %s", i, actual.Timestamp, expected[i].Timestamp)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/common/route/route.go b/vendor/github.com/prometheus/common/route/route.go
new file mode 100644
index 000000000..930b52d4f
--- /dev/null
+++ b/vendor/github.com/prometheus/common/route/route.go
@@ -0,0 +1,137 @@
+package route
+
+import (
+ "fmt"
+ "net/http"
+ "sync"
+
+ "github.com/julienschmidt/httprouter"
+ "golang.org/x/net/context"
+)
+
+var (
+ mtx = sync.RWMutex{}
+ ctxts = map[*http.Request]context.Context{}
+)
+
+// Context returns the context for the request.
+func Context(r *http.Request) context.Context {
+ mtx.RLock()
+ defer mtx.RUnlock()
+ return ctxts[r]
+}
+
+type param string
+
+// Param returns param p for the context.
+func Param(ctx context.Context, p string) string {
+ return ctx.Value(param(p)).(string)
+}
+
+// WithParam returns a new context with param p set to v.
+func WithParam(ctx context.Context, p, v string) context.Context {
+ return context.WithValue(ctx, param(p), v)
+}
+
+type contextFn func(r *http.Request) (context.Context, error)
+
+// Router wraps httprouter.Router and adds support for prefixed sub-routers
+// and per-request context injections.
+type Router struct {
+ rtr *httprouter.Router
+ prefix string
+ ctxFn contextFn
+}
+
+// New returns a new Router.
+func New(ctxFn contextFn) *Router {
+ if ctxFn == nil {
+ ctxFn = func(r *http.Request) (context.Context, error) {
+ return context.Background(), nil
+ }
+ }
+ return &Router{
+ rtr: httprouter.New(),
+ ctxFn: ctxFn,
+ }
+}
+
+// WithPrefix returns a router that prefixes all registered routes with prefix.
+func (r *Router) WithPrefix(prefix string) *Router {
+ return &Router{rtr: r.rtr, prefix: r.prefix + prefix, ctxFn: r.ctxFn}
+}
+
+// handle turns a HandlerFunc into an httprouter.Handle.
+func (r *Router) handle(h http.HandlerFunc) httprouter.Handle {
+ return func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
+ reqCtx, err := r.ctxFn(req)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Error creating request context: %v", err), http.StatusBadRequest)
+ return
+ }
+ ctx, cancel := context.WithCancel(reqCtx)
+ defer cancel()
+
+ for _, p := range params {
+ ctx = context.WithValue(ctx, param(p.Key), p.Value)
+ }
+
+ mtx.Lock()
+ ctxts[req] = ctx
+ mtx.Unlock()
+
+ h(w, req)
+
+ mtx.Lock()
+ delete(ctxts, req)
+ mtx.Unlock()
+ }
+}
+
+// Get registers a new GET route.
+func (r *Router) Get(path string, h http.HandlerFunc) {
+ r.rtr.GET(r.prefix+path, r.handle(h))
+}
+
+// Options registers a new OPTIONS route.
+func (r *Router) Options(path string, h http.HandlerFunc) {
+ r.rtr.OPTIONS(r.prefix+path, r.handle(h))
+}
+
+// Del registers a new DELETE route.
+func (r *Router) Del(path string, h http.HandlerFunc) {
+ r.rtr.DELETE(r.prefix+path, r.handle(h))
+}
+
+// Put registers a new PUT route.
+func (r *Router) Put(path string, h http.HandlerFunc) {
+ r.rtr.PUT(r.prefix+path, r.handle(h))
+}
+
+// Post registers a new POST route.
+func (r *Router) Post(path string, h http.HandlerFunc) {
+ r.rtr.POST(r.prefix+path, r.handle(h))
+}
+
+// Redirect takes an absolute path and sends an internal HTTP redirect for it,
+// prefixed by the router's path prefix. Note that this method does not include
+// functionality for handling relative paths or full URL redirects.
+func (r *Router) Redirect(w http.ResponseWriter, req *http.Request, path string, code int) {
+ http.Redirect(w, req, r.prefix+path, code)
+}
+
+// ServeHTTP implements http.Handler.
+func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ r.rtr.ServeHTTP(w, req)
+}
+
+// FileServe returns a new http.HandlerFunc that serves files from dir.
+// Using routes must provide the *filepath parameter.
+func FileServe(dir string) http.HandlerFunc {
+ fs := http.FileServer(http.Dir(dir))
+
+ return func(w http.ResponseWriter, r *http.Request) {
+ r.URL.Path = Param(Context(r), "filepath")
+ fs.ServeHTTP(w, r)
+ }
+}
diff --git a/vendor/github.com/prometheus/common/route/route_test.go b/vendor/github.com/prometheus/common/route/route_test.go
new file mode 100644
index 000000000..4055d69d5
--- /dev/null
+++ b/vendor/github.com/prometheus/common/route/route_test.go
@@ -0,0 +1,75 @@
+package route
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "golang.org/x/net/context"
+)
+
+func TestRedirect(t *testing.T) {
+ router := New(nil).WithPrefix("/test/prefix")
+ w := httptest.NewRecorder()
+ r, err := http.NewRequest("GET", "http://localhost:9090/foo", nil)
+ if err != nil {
+ t.Fatalf("Error building test request: %s", err)
+ }
+
+ router.Redirect(w, r, "/some/endpoint", http.StatusFound)
+ if w.Code != http.StatusFound {
+ t.Fatalf("Unexpected redirect status code: got %d, want %d", w.Code, http.StatusFound)
+ }
+
+ want := "/test/prefix/some/endpoint"
+ got := w.Header()["Location"][0]
+ if want != got {
+ t.Fatalf("Unexpected redirect location: got %s, want %s", got, want)
+ }
+}
+
+func TestContextFn(t *testing.T) {
+ router := New(func(r *http.Request) (context.Context, error) {
+ return context.WithValue(context.Background(), "testkey", "testvalue"), nil
+ })
+
+ router.Get("/test", func(w http.ResponseWriter, r *http.Request) {
+ want := "testvalue"
+ got := Context(r).Value("testkey")
+ if want != got {
+ t.Fatalf("Unexpected context value: want %q, got %q", want, got)
+ }
+ })
+
+ r, err := http.NewRequest("GET", "http://localhost:9090/test", nil)
+ if err != nil {
+ t.Fatalf("Error building test request: %s", err)
+ }
+ router.ServeHTTP(nil, r)
+}
+
+func TestContextFnError(t *testing.T) {
+ router := New(func(r *http.Request) (context.Context, error) {
+ return context.Background(), fmt.Errorf("test error")
+ })
+
+ router.Get("/test", func(w http.ResponseWriter, r *http.Request) {})
+
+ r, err := http.NewRequest("GET", "http://localhost:9090/test", nil)
+ if err != nil {
+ t.Fatalf("Error building test request: %s", err)
+ }
+ w := httptest.NewRecorder()
+ router.ServeHTTP(w, r)
+
+ if w.Code != http.StatusBadRequest {
+ t.Fatalf("Unexpected response status: got %q, want %q", w.Code, http.StatusBadRequest)
+ }
+
+ want := "Error creating request context: test error\n"
+ got := w.Body.String()
+ if want != got {
+ t.Fatalf("Unexpected response body: got %q, want %q", got, want)
+ }
+}
diff --git a/vendor/github.com/prometheus/common/version/info.go b/vendor/github.com/prometheus/common/version/info.go
new file mode 100644
index 000000000..84489a510
--- /dev/null
+++ b/vendor/github.com/prometheus/common/version/info.go
@@ -0,0 +1,89 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "strings"
+ "text/template"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// Build information. Populated at build-time.
+var (
+ Version string
+ Revision string
+ Branch string
+ BuildUser string
+ BuildDate string
+ GoVersion = runtime.Version()
+)
+
+// NewCollector returns a collector which exports metrics about current version information.
+func NewCollector(program string) *prometheus.GaugeVec {
+ buildInfo := prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: program,
+ Name: "build_info",
+ Help: fmt.Sprintf(
+ "A metric with a constant '1' value labeled by version, revision, branch, and goversion from which %s was built.",
+ program,
+ ),
+ },
+ []string{"version", "revision", "branch", "goversion"},
+ )
+ buildInfo.WithLabelValues(Version, Revision, Branch, GoVersion).Set(1)
+ return buildInfo
+}
+
+// versionInfoTmpl contains the template used by Info.
+var versionInfoTmpl = `
+{{.program}}, version {{.version}} (branch: {{.branch}}, revision: {{.revision}})
+ build user: {{.buildUser}}
+ build date: {{.buildDate}}
+ go version: {{.goVersion}}
+`
+
+// Print returns version information.
+func Print(program string) string {
+ m := map[string]string{
+ "program": program,
+ "version": Version,
+ "revision": Revision,
+ "branch": Branch,
+ "buildUser": BuildUser,
+ "buildDate": BuildDate,
+ "goVersion": GoVersion,
+ }
+ t := template.Must(template.New("version").Parse(versionInfoTmpl))
+
+ var buf bytes.Buffer
+ if err := t.ExecuteTemplate(&buf, "version", m); err != nil {
+ panic(err)
+ }
+ return strings.TrimSpace(buf.String())
+}
+
+// Info returns version, branch and revision information.
+func Info() string {
+ return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, Revision)
+}
+
+// BuildContext returns goVersion, buildUser and buildDate information.
+func BuildContext() string {
+ return fmt.Sprintf("(go=%s, user=%s, date=%s)", GoVersion, BuildUser, BuildDate)
+}