summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/prometheus
diff options
context:
space:
mode:
authorCorey Hulen <corey@hulen.com>2016-11-22 11:05:54 -0800
committerHarrison Healey <harrisonmhealey@gmail.com>2016-11-22 14:05:54 -0500
commit7961599b2e41c71720a42b3bfde641f7529f05fe (patch)
tree3c039e1d3790a954ba65fe551c7b348331bce994 /vendor/github.com/prometheus
parente033dcce8e57ed6b6684227adf9b29347e4718b3 (diff)
downloadchat-7961599b2e41c71720a42b3bfde641f7529f05fe.tar.gz
chat-7961599b2e41c71720a42b3bfde641f7529f05fe.tar.bz2
chat-7961599b2e41c71720a42b3bfde641f7529f05fe.zip
PLT-4357 adding performance monitoring (#4622)
* WIP * WIP * Adding metrics collection * updating vendor packages * Adding metrics to config * Adding admin console page for perf monitoring * Updating glide * switching to tylerb/graceful
Diffstat (limited to 'vendor/github.com/prometheus')
-rw-r--r--vendor/github.com/prometheus/client_golang/.gitignore26
-rw-r--r--vendor/github.com/prometheus/client_golang/.travis.yml9
-rw-r--r--vendor/github.com/prometheus/client_golang/AUTHORS.md18
-rw-r--r--vendor/github.com/prometheus/client_golang/CHANGELOG.md109
-rw-r--r--vendor/github.com/prometheus/client_golang/CONTRIBUTING.md18
-rw-r--r--vendor/github.com/prometheus/client_golang/LICENSE201
-rw-r--r--vendor/github.com/prometheus/client_golang/NOTICE23
-rw-r--r--vendor/github.com/prometheus/client_golang/README.md45
-rw-r--r--vendor/github.com/prometheus/client_golang/VERSION1
-rw-r--r--vendor/github.com/prometheus/client_golang/api/prometheus/api.go345
-rw-r--r--vendor/github.com/prometheus/client_golang/api/prometheus/api_test.go453
-rw-r--r--vendor/github.com/prometheus/client_golang/examples/random/main.go103
-rw-r--r--vendor/github.com/prometheus/client_golang/examples/simple/main.go30
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/.gitignore1
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/README.md1
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go183
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/collector.go75
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/counter.go172
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/counter_test.go58
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/desc.go205
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/doc.go181
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go118
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/examples_test.go751
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go119
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go97
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/fnv.go29
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/gauge.go140
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go182
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector.go263
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go123
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/histogram.go444
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go326
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/http.go490
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/http_test.go121
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/metric.go166
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/metric_test.go35
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/process_collector.go142
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go58
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go201
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go137
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go56
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/push/push.go172
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/push/push_test.go176
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/registry.go806
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/registry_test.go545
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/summary.go534
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/summary_test.go347
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/untyped.go138
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/value.go234
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/vec.go404
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/vec_test.go312
-rw-r--r--vendor/github.com/prometheus/client_model/.gitignore1
-rw-r--r--vendor/github.com/prometheus/client_model/AUTHORS.md13
-rw-r--r--vendor/github.com/prometheus/client_model/CONTRIBUTING.md18
-rw-r--r--vendor/github.com/prometheus/client_model/LICENSE201
-rw-r--r--vendor/github.com/prometheus/client_model/Makefile61
-rw-r--r--vendor/github.com/prometheus/client_model/NOTICE5
-rw-r--r--vendor/github.com/prometheus/client_model/README.md26
-rw-r--r--vendor/github.com/prometheus/client_model/cpp/metrics.pb.cc3380
-rw-r--r--vendor/github.com/prometheus/client_model/cpp/metrics.pb.h2072
-rw-r--r--vendor/github.com/prometheus/client_model/go/metrics.pb.go364
-rw-r--r--vendor/github.com/prometheus/client_model/metrics.proto81
-rw-r--r--vendor/github.com/prometheus/client_model/pom.xml130
-rw-r--r--vendor/github.com/prometheus/client_model/python/prometheus/__init__.py12
-rw-r--r--vendor/github.com/prometheus/client_model/python/prometheus/client/__init__.py12
-rw-r--r--vendor/github.com/prometheus/client_model/python/prometheus/client/model/__init__.py14
-rw-r--r--vendor/github.com/prometheus/client_model/python/prometheus/client/model/metrics_pb2.py575
-rw-r--r--vendor/github.com/prometheus/client_model/ruby/.gitignore5
-rw-r--r--vendor/github.com/prometheus/client_model/ruby/Gemfile4
-rw-r--r--vendor/github.com/prometheus/client_model/ruby/LICENSE201
-rw-r--r--vendor/github.com/prometheus/client_model/ruby/Makefile17
-rw-r--r--vendor/github.com/prometheus/client_model/ruby/README.md31
-rw-r--r--vendor/github.com/prometheus/client_model/ruby/Rakefile1
-rw-r--r--vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model.rb2
-rw-r--r--vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/metrics.pb.rb111
-rw-r--r--vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/version.rb7
-rw-r--r--vendor/github.com/prometheus/client_model/ruby/prometheus-client-model.gemspec22
-rw-r--r--vendor/github.com/prometheus/client_model/setup.py23
-rw-r--r--vendor/github.com/prometheus/client_model/src/main/java/io/prometheus/client/Metrics.java7683
-rw-r--r--vendor/github.com/prometheus/common/.travis.yml7
-rw-r--r--vendor/github.com/prometheus/common/AUTHORS.md11
-rw-r--r--vendor/github.com/prometheus/common/CONTRIBUTING.md18
-rw-r--r--vendor/github.com/prometheus/common/LICENSE201
-rw-r--r--vendor/github.com/prometheus/common/NOTICE5
-rw-r--r--vendor/github.com/prometheus/common/README.md12
-rw-r--r--vendor/github.com/prometheus/common/config/config.go30
-rw-r--r--vendor/github.com/prometheus/common/config/testdata/tls_config.cert_no_key.bad.yml1
-rw-r--r--vendor/github.com/prometheus/common/config/testdata/tls_config.empty.good.yml0
-rw-r--r--vendor/github.com/prometheus/common/config/testdata/tls_config.insecure.good.yml1
-rw-r--r--vendor/github.com/prometheus/common/config/testdata/tls_config.invalid_field.bad.yml1
-rw-r--r--vendor/github.com/prometheus/common/config/testdata/tls_config.key_no_cert.bad.yml1
-rw-r--r--vendor/github.com/prometheus/common/config/tls_config.go79
-rw-r--r--vendor/github.com/prometheus/common/config/tls_config_test.go92
-rw-r--r--vendor/github.com/prometheus/common/expfmt/bench_test.go167
-rw-r--r--vendor/github.com/prometheus/common/expfmt/decode.go412
-rw-r--r--vendor/github.com/prometheus/common/expfmt/decode_test.go367
-rw-r--r--vendor/github.com/prometheus/common/expfmt/encode.go88
-rw-r--r--vendor/github.com/prometheus/common/expfmt/expfmt.go37
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz.go36
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_02
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_16
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_212
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_322
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_410
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_01
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_101
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_111
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_123
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_133
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_143
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_152
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_162
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_171
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_181
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_193
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_23
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_31
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_41
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_51
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_61
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_73
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_81
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_91
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz/corpus/minimal1
-rw-r--r--vendor/github.com/prometheus/common/expfmt/testdata/json246
-rw-r--r--vendor/github.com/prometheus/common/expfmt/testdata/json2_bad46
-rw-r--r--vendor/github.com/prometheus/common/expfmt/testdata/protobufbin0 -> 8239 bytes
-rw-r--r--vendor/github.com/prometheus/common/expfmt/testdata/protobuf.gzbin0 -> 2097 bytes
-rw-r--r--vendor/github.com/prometheus/common/expfmt/testdata/text322
-rw-r--r--vendor/github.com/prometheus/common/expfmt/testdata/text.gzbin0 -> 2598 bytes
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_create.go303
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_create_test.go443
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_parse.go753
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_parse_test.go588
-rw-r--r--vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt67
-rw-r--r--vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go162
-rw-r--r--vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go33
-rw-r--r--vendor/github.com/prometheus/common/log/eventlog_formatter.go89
-rw-r--r--vendor/github.com/prometheus/common/log/log.go365
-rw-r--r--vendor/github.com/prometheus/common/log/log_test.go39
-rw-r--r--vendor/github.com/prometheus/common/log/syslog_formatter.go119
-rw-r--r--vendor/github.com/prometheus/common/model/alert.go136
-rw-r--r--vendor/github.com/prometheus/common/model/alert_test.go118
-rw-r--r--vendor/github.com/prometheus/common/model/fingerprinting.go105
-rw-r--r--vendor/github.com/prometheus/common/model/fnv.go42
-rw-r--r--vendor/github.com/prometheus/common/model/labels.go210
-rw-r--r--vendor/github.com/prometheus/common/model/labels_test.go140
-rw-r--r--vendor/github.com/prometheus/common/model/labelset.go169
-rw-r--r--vendor/github.com/prometheus/common/model/metric.go103
-rw-r--r--vendor/github.com/prometheus/common/model/metric_test.go132
-rw-r--r--vendor/github.com/prometheus/common/model/model.go16
-rw-r--r--vendor/github.com/prometheus/common/model/signature.go144
-rw-r--r--vendor/github.com/prometheus/common/model/signature_test.go314
-rw-r--r--vendor/github.com/prometheus/common/model/silence.go106
-rw-r--r--vendor/github.com/prometheus/common/model/silence_test.go228
-rw-r--r--vendor/github.com/prometheus/common/model/time.go249
-rw-r--r--vendor/github.com/prometheus/common/model/time_test.go129
-rw-r--r--vendor/github.com/prometheus/common/model/value.go419
-rw-r--r--vendor/github.com/prometheus/common/model/value_test.go417
-rw-r--r--vendor/github.com/prometheus/common/route/route.go137
-rw-r--r--vendor/github.com/prometheus/common/route/route_test.go75
-rw-r--r--vendor/github.com/prometheus/common/version/info.go89
-rw-r--r--vendor/github.com/prometheus/procfs/.travis.yml5
-rw-r--r--vendor/github.com/prometheus/procfs/AUTHORS.md20
-rw-r--r--vendor/github.com/prometheus/procfs/CONTRIBUTING.md18
-rw-r--r--vendor/github.com/prometheus/procfs/LICENSE201
-rw-r--r--vendor/github.com/prometheus/procfs/Makefile6
-rw-r--r--vendor/github.com/prometheus/procfs/NOTICE7
-rw-r--r--vendor/github.com/prometheus/procfs/README.md10
-rw-r--r--vendor/github.com/prometheus/procfs/doc.go45
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/26231/cmdlinebin0 -> 16 bytes
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/26231/comm1
l---------vendor/github.com/prometheus/procfs/fixtures/26231/exe1
l---------vendor/github.com/prometheus/procfs/fixtures/26231/fd/01
l---------vendor/github.com/prometheus/procfs/fixtures/26231/fd/11
l---------vendor/github.com/prometheus/procfs/fixtures/26231/fd/101
l---------vendor/github.com/prometheus/procfs/fixtures/26231/fd/21
l---------vendor/github.com/prometheus/procfs/fixtures/26231/fd/31
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/26231/io7
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/26231/limits17
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/26231/stat1
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/26232/cmdline0
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/26232/comm1
l---------vendor/github.com/prometheus/procfs/fixtures/26232/fd/01
l---------vendor/github.com/prometheus/procfs/fixtures/26232/fd/11
l---------vendor/github.com/prometheus/procfs/fixtures/26232/fd/21
l---------vendor/github.com/prometheus/procfs/fixtures/26232/fd/31
l---------vendor/github.com/prometheus/procfs/fixtures/26232/fd/41
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/26232/limits17
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/26232/stat1
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/584/stat2
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/mdstat26
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/net/ip_vs14
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/net/ip_vs_stats6
l---------vendor/github.com/prometheus/procfs/fixtures/self1
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/stat16
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/symlinktargets/README2
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/symlinktargets/abc0
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/symlinktargets/def0
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/symlinktargets/ghi0
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/symlinktargets/uvw0
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures/symlinktargets/xyz0
-rw-r--r--vendor/github.com/prometheus/procfs/fs.go33
-rw-r--r--vendor/github.com/prometheus/procfs/fs_test.go13
-rw-r--r--vendor/github.com/prometheus/procfs/ipvs.go224
-rw-r--r--vendor/github.com/prometheus/procfs/ipvs_test.go190
-rw-r--r--vendor/github.com/prometheus/procfs/mdstat.go138
-rw-r--r--vendor/github.com/prometheus/procfs/mdstat_test.go31
-rw-r--r--vendor/github.com/prometheus/procfs/proc.go212
-rw-r--r--vendor/github.com/prometheus/procfs/proc_io.go55
-rw-r--r--vendor/github.com/prometheus/procfs/proc_io_test.go33
-rw-r--r--vendor/github.com/prometheus/procfs/proc_limits.go137
-rw-r--r--vendor/github.com/prometheus/procfs/proc_limits_test.go31
-rw-r--r--vendor/github.com/prometheus/procfs/proc_stat.go175
-rw-r--r--vendor/github.com/prometheus/procfs/proc_stat_test.go110
-rw-r--r--vendor/github.com/prometheus/procfs/proc_test.go160
-rw-r--r--vendor/github.com/prometheus/procfs/stat.go56
-rw-r--r--vendor/github.com/prometheus/procfs/stat_test.go14
219 files changed, 35548 insertions, 0 deletions
diff --git a/vendor/github.com/prometheus/client_golang/.gitignore b/vendor/github.com/prometheus/client_golang/.gitignore
new file mode 100644
index 000000000..f6fc2e8eb
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/.gitignore
@@ -0,0 +1,26 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+*~
+*#
+.build
diff --git a/vendor/github.com/prometheus/client_golang/.travis.yml b/vendor/github.com/prometheus/client_golang/.travis.yml
new file mode 100644
index 000000000..d83f31a59
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/.travis.yml
@@ -0,0 +1,9 @@
+sudo: false
+language: go
+
+go:
+ - 1.5.4
+ - 1.6.2
+
+script:
+ - go test -short ./...
diff --git a/vendor/github.com/prometheus/client_golang/AUTHORS.md b/vendor/github.com/prometheus/client_golang/AUTHORS.md
new file mode 100644
index 000000000..c5275d5ab
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/AUTHORS.md
@@ -0,0 +1,18 @@
+The Prometheus project was started by Matt T. Proud (emeritus) and
+Julius Volz in 2012.
+
+Maintainers of this repository:
+
+* Björn Rabenstein <beorn@soundcloud.com>
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Bernerd Schaefer <bj.schaefer@gmail.com>
+* Björn Rabenstein <beorn@soundcloud.com>
+* Daniel Bornkessel <daniel@soundcloud.com>
+* Jeff Younker <jeff@drinktomi.com>
+* Julius Volz <julius.volz@gmail.com>
+* Matt T. Proud <matt.proud@gmail.com>
+* Tobias Schmidt <ts@soundcloud.com>
+
diff --git a/vendor/github.com/prometheus/client_golang/CHANGELOG.md b/vendor/github.com/prometheus/client_golang/CHANGELOG.md
new file mode 100644
index 000000000..330788a4e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/CHANGELOG.md
@@ -0,0 +1,109 @@
+## 0.8.0 / 2016-08-17
+* [CHANGE] Registry is doing more consistency checks. This might break
+ existing setups that used to export inconsistent metrics.
+* [CHANGE] Pushing to Pushgateway moved to package `push` and changed to allow
+ arbitrary grouping.
+* [CHANGE] Removed `SelfCollector`.
+* [CHANGE] Removed `PanicOnCollectError` and `EnableCollectChecks` methods.
+* [CHANGE] Moved packages to the prometheus/common repo: `text`, `model`,
+ `extraction`.
+* [CHANGE] Deprecated a number of functions.
+* [FEATURE] Allow custom registries. Added `Registerer` and `Gatherer`
+ interfaces.
+* [FEATURE] Separated HTTP exposition, allowing custom HTTP handlers (package
+ `promhttp`) and enabling the creation of other exposition mechanisms.
+* [FEATURE] `MustRegister` is variadic now, allowing registration of many
+ collectors in one call.
+* [FEATURE] Added HTTP API v1 package.
+* [ENHANCEMENT] Numerous documentation improvements.
+* [ENHANCEMENT] Improved metric sorting.
+* [ENHANCEMENT] Inlined fnv64a hashing for improved performance.
+* [ENHANCEMENT] Several test improvements.
+* [BUGFIX] Handle collisions in MetricVec.
+
+## 0.7.0 / 2015-07-27
+* [CHANGE] Rename ExporterLabelPrefix to ExportedLabelPrefix.
+* [BUGFIX] Closed gaps in metric consistency check.
+* [BUGFIX] Validate LabelName/LabelSet on JSON unmarshaling.
+* [ENHANCEMENT] Document the possibility to create "empty" metrics in
+ a metric vector.
+* [ENHANCEMENT] Fix and clarify various doc comments and the README.md.
+* [ENHANCEMENT] (Kind of) solve "The Proxy Problem" of http.InstrumentHandler.
+* [ENHANCEMENT] Change responseWriterDelegator.written to int64.
+
+## 0.6.0 / 2015-06-01
+* [CHANGE] Rename process_goroutines to go_goroutines.
+* [ENHANCEMENT] Validate label names during YAML decoding.
+* [ENHANCEMENT] Add LabelName regular expression.
+* [BUGFIX] Ensure alignment of struct members for 32-bit systems.
+
+## 0.5.0 / 2015-05-06
+* [BUGFIX] Removed a weakness in the fingerprinting aka signature code.
+ This makes fingerprinting slower and more allocation-heavy, but the
+ weakness was too severe to be tolerated.
+* [CHANGE] As a result of the above, Metric.Fingerprint is now returning
+ a different fingerprint. To keep the same fingerprint, the new method
+ Metric.FastFingerprint was introduced, which will be used by the
+ Prometheus server for storage purposes (implying that a collision
+ detection has to be added, too).
+* [ENHANCEMENT] The Metric.Equal and Metric.Before do not depend on
+ fingerprinting anymore, removing the possibility of an undetected
+ fingerprint collision.
+* [FEATURE] The Go collector in the exposition library includes garbage
+ collection stats.
+* [FEATURE] The exposition library allows to create constant "throw-away"
+ summaries and histograms.
+* [CHANGE] A number of new reserved labels and prefixes.
+
+## 0.4.0 / 2015-04-08
+* [CHANGE] Return NaN when Summaries have no observations yet.
+* [BUGFIX] Properly handle Summary decay upon Write().
+* [BUGFIX] Fix the documentation link to the consumption library.
+* [FEATURE] Allow the metric family injection hook to merge with existing
+ metric families.
+* [ENHANCEMENT] Removed cgo dependency and conditional compilation of procfs.
+* [MAINTENANCE] Adjusted to changes in matttproud/golang_protobuf_extensions.
+
+## 0.3.2 / 2015-03-11
+* [BUGFIX] Fixed the receiver type of COWMetric.Set(). This method is
+ only used by the Prometheus server internally.
+* [CLEANUP] Added licenses of vendored code left out by godep.
+
+## 0.3.1 / 2015-03-04
+* [ENHANCEMENT] Switched fingerprinting functions from own free list to
+ sync.Pool.
+* [CHANGE] Makefile uses Go 1.4.2 now (only relevant for examples and tests).
+
+## 0.3.0 / 2015-03-03
+* [CHANGE] Changed the fingerprinting for metrics. THIS WILL INVALIDATE ALL
+ PERSISTED FINGERPRINTS. IF YOU COMPILE THE PROMETHEUS SERVER WITH THIS
+ VERSION, YOU HAVE TO WIPE THE PREVIOUSLY CREATED STORAGE.
+* [CHANGE] LabelValuesToSignature removed. (Nobody had used it, and it was
+ arguably broken.)
+* [CHANGE] Vendored dependencies. Those are only used by the Makefile. If
+ client_golang is used as a library, the vendoring will stay out of your way.
+* [BUGFIX] Remove a weakness in the fingerprinting for metrics. (This made
+ the fingerprinting change above necessary.)
+* [FEATURE] Added new fingerprinting functions SignatureForLabels and
+ SignatureWithoutLabels to be used by the Prometheus server. These functions
+ require fewer allocations than the ones currently used by the server.
+
+## 0.2.0 / 2015-02-23
+* [FEATURE] Introduce new Histagram metric type.
+* [CHANGE] Ignore process collector errors for now (better error handling
+ pending).
+* [CHANGE] Use clear error interface for process pidFn.
+* [BUGFIX] Fix Go download links for several archs and OSes.
+* [ENHANCEMENT] Massively improve Gauge and Counter performance.
+* [ENHANCEMENT] Catch illegal label names for summaries in histograms.
+* [ENHANCEMENT] Reduce allocations during fingerprinting.
+* [ENHANCEMENT] Remove cgo dependency. procfs package will only be included if
+ both cgo is available and the build is for an OS with procfs.
+* [CLEANUP] Clean up code style issues.
+* [CLEANUP] Mark slow test as such and exclude them from travis.
+* [CLEANUP] Update protobuf library package name.
+* [CLEANUP] Updated vendoring of beorn7/perks.
+
+## 0.1.0 / 2015-02-02
+* [CLEANUP] Introduced semantic versioning and changelog. From now on,
+ changes will be reported in this file.
diff --git a/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md b/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md
new file mode 100644
index 000000000..5705f0fbe
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE
new file mode 100644
index 000000000..dd878a30e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/NOTICE
@@ -0,0 +1,23 @@
+Prometheus instrumentation library for Go applications
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
+
+
+The following components are included in this product:
+
+perks - a fork of https://github.com/bmizerany/perks
+https://github.com/beorn7/perks
+Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
+See https://github.com/beorn7/perks/blob/master/README.md for license details.
+
+Go support for Protocol Buffers - Google's data interchange format
+http://github.com/golang/protobuf/
+Copyright 2010 The Go Authors
+See source code for license details.
+
+Support for streaming Protocol Buffer messages for the Go language (golang).
+https://github.com/matttproud/golang_protobuf_extensions
+Copyright 2013 Matt T. Proud
+Licensed under the Apache License, Version 2.0
diff --git a/vendor/github.com/prometheus/client_golang/README.md b/vendor/github.com/prometheus/client_golang/README.md
new file mode 100644
index 000000000..557eacf5a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/README.md
@@ -0,0 +1,45 @@
+# Prometheus Go client library
+
+[![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang)
+
+This is the [Go](http://golang.org) client library for
+[Prometheus](http://prometheus.io). It has two separate parts, one for
+instrumenting application code, and one for creating clients that talk to the
+Prometheus HTTP API.
+
+## Instrumenting applications
+
+[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus)
+
+The
+[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus)
+contains the instrumentation library. See the
+[best practices section](http://prometheus.io/docs/practices/naming/) of the
+Prometheus documentation to learn more about instrumenting applications.
+
+The
+[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples)
+contains simple examples of instrumented code.
+
+## Client for the Prometheus HTTP API
+
+[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api/prometheus)
+
+The
+[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus)
+contains the client for the
+[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you
+to write Go applications that query time series data from a Prometheus server.
+
+## Where is `model`, `extraction`, and `text`?
+
+The `model` packages has been moved to
+[`prometheus/common/model`](https://github.com/prometheus/common/tree/master/model).
+
+The `extraction` and `text` packages are now contained in
+[`prometheus/common/expfmt`](https://github.com/prometheus/common/tree/master/expfmt).
+
+## Contributing and community
+
+See the [contributing guidelines](CONTRIBUTING.md) and the
+[Community section](http://prometheus.io/community/) of the homepage.
diff --git a/vendor/github.com/prometheus/client_golang/VERSION b/vendor/github.com/prometheus/client_golang/VERSION
new file mode 100644
index 000000000..a3df0a695
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/VERSION
@@ -0,0 +1 @@
+0.8.0
diff --git a/vendor/github.com/prometheus/client_golang/api/prometheus/api.go b/vendor/github.com/prometheus/client_golang/api/prometheus/api.go
new file mode 100644
index 000000000..3028d741d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/api/prometheus/api.go
@@ -0,0 +1,345 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus provides bindings to the Prometheus HTTP API:
+// http://prometheus.io/docs/querying/api/
+package prometheus
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "path"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/prometheus/common/model"
+ "golang.org/x/net/context"
+ "golang.org/x/net/context/ctxhttp"
+)
+
+const (
+ statusAPIError = 422
+ apiPrefix = "/api/v1"
+
+ epQuery = "/query"
+ epQueryRange = "/query_range"
+ epLabelValues = "/label/:name/values"
+ epSeries = "/series"
+)
+
+type ErrorType string
+
+const (
+ // The different API error types.
+ ErrBadData ErrorType = "bad_data"
+ ErrTimeout = "timeout"
+ ErrCanceled = "canceled"
+ ErrExec = "execution"
+ ErrBadResponse = "bad_response"
+)
+
+// Error is an error returned by the API.
+type Error struct {
+ Type ErrorType
+ Msg string
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("%s: %s", e.Type, e.Msg)
+}
+
+// CancelableTransport is like net.Transport but provides
+// per-request cancelation functionality.
+type CancelableTransport interface {
+ http.RoundTripper
+ CancelRequest(req *http.Request)
+}
+
+var DefaultTransport CancelableTransport = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+}
+
+// Config defines configuration parameters for a new client.
+type Config struct {
+ // The address of the Prometheus to connect to.
+ Address string
+
+ // Transport is used by the Client to drive HTTP requests. If not
+ // provided, DefaultTransport will be used.
+ Transport CancelableTransport
+}
+
+func (cfg *Config) transport() CancelableTransport {
+ if cfg.Transport == nil {
+ return DefaultTransport
+ }
+ return cfg.Transport
+}
+
+type Client interface {
+ url(ep string, args map[string]string) *url.URL
+ do(context.Context, *http.Request) (*http.Response, []byte, error)
+}
+
+// New returns a new Client.
+//
+// It is safe to use the returned Client from multiple goroutines.
+func New(cfg Config) (Client, error) {
+ u, err := url.Parse(cfg.Address)
+ if err != nil {
+ return nil, err
+ }
+ u.Path = strings.TrimRight(u.Path, "/") + apiPrefix
+
+ return &httpClient{
+ endpoint: u,
+ transport: cfg.transport(),
+ }, nil
+}
+
+type httpClient struct {
+ endpoint *url.URL
+ transport CancelableTransport
+}
+
+func (c *httpClient) url(ep string, args map[string]string) *url.URL {
+ p := path.Join(c.endpoint.Path, ep)
+
+ for arg, val := range args {
+ arg = ":" + arg
+ p = strings.Replace(p, arg, val, -1)
+ }
+
+ u := *c.endpoint
+ u.Path = p
+
+ return &u
+}
+
+func (c *httpClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
+ resp, err := ctxhttp.Do(ctx, &http.Client{Transport: c.transport}, req)
+
+ defer func() {
+ if resp != nil {
+ resp.Body.Close()
+ }
+ }()
+
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var body []byte
+ done := make(chan struct{})
+ go func() {
+ body, err = ioutil.ReadAll(resp.Body)
+ close(done)
+ }()
+
+ select {
+ case <-ctx.Done():
+ err = resp.Body.Close()
+ <-done
+ if err == nil {
+ err = ctx.Err()
+ }
+ case <-done:
+ }
+
+ return resp, body, err
+}
+
+// apiClient wraps a regular client and processes successful API responses.
+// Successful also includes responses that errored at the API level.
+type apiClient struct {
+ Client
+}
+
+type apiResponse struct {
+ Status string `json:"status"`
+ Data json.RawMessage `json:"data"`
+ ErrorType ErrorType `json:"errorType"`
+ Error string `json:"error"`
+}
+
+func (c apiClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
+ resp, body, err := c.Client.do(ctx, req)
+ if err != nil {
+ return resp, body, err
+ }
+
+ code := resp.StatusCode
+
+ if code/100 != 2 && code != statusAPIError {
+ return resp, body, &Error{
+ Type: ErrBadResponse,
+ Msg: fmt.Sprintf("bad response code %d", resp.StatusCode),
+ }
+ }
+
+ var result apiResponse
+
+ if err = json.Unmarshal(body, &result); err != nil {
+ return resp, body, &Error{
+ Type: ErrBadResponse,
+ Msg: err.Error(),
+ }
+ }
+
+ if (code == statusAPIError) != (result.Status == "error") {
+ err = &Error{
+ Type: ErrBadResponse,
+ Msg: "inconsistent body for response code",
+ }
+ }
+
+ if code == statusAPIError && result.Status == "error" {
+ err = &Error{
+ Type: result.ErrorType,
+ Msg: result.Error,
+ }
+ }
+
+ return resp, []byte(result.Data), err
+}
+
+// Range represents a sliced time range.
+type Range struct {
+ // The boundaries of the time range.
+ Start, End time.Time
+ // The maximum time between two slices within the boundaries.
+ Step time.Duration
+}
+
+// queryResult contains result data for a query.
+type queryResult struct {
+ Type model.ValueType `json:"resultType"`
+ Result interface{} `json:"result"`
+
+ // The decoded value.
+ v model.Value
+}
+
+func (qr *queryResult) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Type model.ValueType `json:"resultType"`
+ Result json.RawMessage `json:"result"`
+ }{}
+
+ err := json.Unmarshal(b, &v)
+ if err != nil {
+ return err
+ }
+
+ switch v.Type {
+ case model.ValScalar:
+ var sv model.Scalar
+ err = json.Unmarshal(v.Result, &sv)
+ qr.v = &sv
+
+ case model.ValVector:
+ var vv model.Vector
+ err = json.Unmarshal(v.Result, &vv)
+ qr.v = vv
+
+ case model.ValMatrix:
+ var mv model.Matrix
+ err = json.Unmarshal(v.Result, &mv)
+ qr.v = mv
+
+ default:
+ err = fmt.Errorf("unexpected value type %q", v.Type)
+ }
+ return err
+}
+
+// QueryAPI provides bindings the Prometheus's query API.
+type QueryAPI interface {
+ // Query performs a query for the given time.
+ Query(ctx context.Context, query string, ts time.Time) (model.Value, error)
+ // Query performs a query for the given range.
+ QueryRange(ctx context.Context, query string, r Range) (model.Value, error)
+}
+
+// NewQueryAPI returns a new QueryAPI for the client.
+//
+// It is safe to use the returned QueryAPI from multiple goroutines.
+func NewQueryAPI(c Client) QueryAPI {
+ return &httpQueryAPI{client: apiClient{c}}
+}
+
+type httpQueryAPI struct {
+ client Client
+}
+
+func (h *httpQueryAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, error) {
+ u := h.client.url(epQuery, nil)
+ q := u.Query()
+
+ q.Set("query", query)
+ q.Set("time", ts.Format(time.RFC3339Nano))
+
+ u.RawQuery = q.Encode()
+
+ req, _ := http.NewRequest("GET", u.String(), nil)
+
+ _, body, err := h.client.do(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ var qres queryResult
+ err = json.Unmarshal(body, &qres)
+
+ return model.Value(qres.v), err
+}
+
+func (h *httpQueryAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, error) {
+ u := h.client.url(epQueryRange, nil)
+ q := u.Query()
+
+ var (
+ start = r.Start.Format(time.RFC3339Nano)
+ end = r.End.Format(time.RFC3339Nano)
+ step = strconv.FormatFloat(r.Step.Seconds(), 'f', 3, 64)
+ )
+
+ q.Set("query", query)
+ q.Set("start", start)
+ q.Set("end", end)
+ q.Set("step", step)
+
+ u.RawQuery = q.Encode()
+
+ req, _ := http.NewRequest("GET", u.String(), nil)
+
+ _, body, err := h.client.do(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ var qres queryResult
+ err = json.Unmarshal(body, &qres)
+
+ return model.Value(qres.v), err
+}
diff --git a/vendor/github.com/prometheus/client_golang/api/prometheus/api_test.go b/vendor/github.com/prometheus/client_golang/api/prometheus/api_test.go
new file mode 100644
index 000000000..87d3e408e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/api/prometheus/api_test.go
@@ -0,0 +1,453 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/prometheus/common/model"
+ "golang.org/x/net/context"
+)
+
+func TestConfig(t *testing.T) {
+ c := Config{}
+ if c.transport() != DefaultTransport {
+ t.Fatalf("expected default transport for nil Transport field")
+ }
+}
+
+func TestClientURL(t *testing.T) {
+ tests := []struct {
+ address string
+ endpoint string
+ args map[string]string
+ expected string
+ }{
+ {
+ address: "http://localhost:9090",
+ endpoint: "/test",
+ expected: "http://localhost:9090/test",
+ },
+ {
+ address: "http://localhost",
+ endpoint: "/test",
+ expected: "http://localhost/test",
+ },
+ {
+ address: "http://localhost:9090",
+ endpoint: "test",
+ expected: "http://localhost:9090/test",
+ },
+ {
+ address: "http://localhost:9090/prefix",
+ endpoint: "/test",
+ expected: "http://localhost:9090/prefix/test",
+ },
+ {
+ address: "https://localhost:9090/",
+ endpoint: "/test/",
+ expected: "https://localhost:9090/test",
+ },
+ {
+ address: "http://localhost:9090",
+ endpoint: "/test/:param",
+ args: map[string]string{
+ "param": "content",
+ },
+ expected: "http://localhost:9090/test/content",
+ },
+ {
+ address: "http://localhost:9090",
+ endpoint: "/test/:param/more/:param",
+ args: map[string]string{
+ "param": "content",
+ },
+ expected: "http://localhost:9090/test/content/more/content",
+ },
+ {
+ address: "http://localhost:9090",
+ endpoint: "/test/:param/more/:foo",
+ args: map[string]string{
+ "param": "content",
+ "foo": "bar",
+ },
+ expected: "http://localhost:9090/test/content/more/bar",
+ },
+ {
+ address: "http://localhost:9090",
+ endpoint: "/test/:param",
+ args: map[string]string{
+ "nonexistant": "content",
+ },
+ expected: "http://localhost:9090/test/:param",
+ },
+ }
+
+ for _, test := range tests {
+ ep, err := url.Parse(test.address)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ hclient := &httpClient{
+ endpoint: ep,
+ transport: DefaultTransport,
+ }
+
+ u := hclient.url(test.endpoint, test.args)
+ if u.String() != test.expected {
+ t.Errorf("unexpected result: got %s, want %s", u, test.expected)
+ continue
+ }
+
+ // The apiClient must return exactly the same result as the httpClient.
+ aclient := &apiClient{hclient}
+
+ u = aclient.url(test.endpoint, test.args)
+ if u.String() != test.expected {
+ t.Errorf("unexpected result: got %s, want %s", u, test.expected)
+ }
+ }
+}
+
+type testClient struct {
+ *testing.T
+
+ ch chan apiClientTest
+ req *http.Request
+}
+
+type apiClientTest struct {
+ code int
+ response interface{}
+ expected string
+ err *Error
+}
+
+func (c *testClient) url(ep string, args map[string]string) *url.URL {
+ return nil
+}
+
+func (c *testClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
+ if ctx == nil {
+ c.Fatalf("context was not passed down")
+ }
+ if req != c.req {
+ c.Fatalf("request was not passed down")
+ }
+
+ test := <-c.ch
+
+ var b []byte
+ var err error
+
+ switch v := test.response.(type) {
+ case string:
+ b = []byte(v)
+ default:
+ b, err = json.Marshal(v)
+ if err != nil {
+ c.Fatal(err)
+ }
+ }
+
+ resp := &http.Response{
+ StatusCode: test.code,
+ }
+
+ return resp, b, nil
+}
+
+func TestAPIClientDo(t *testing.T) {
+ tests := []apiClientTest{
+ {
+ response: &apiResponse{
+ Status: "error",
+ Data: json.RawMessage(`null`),
+ ErrorType: ErrBadData,
+ Error: "failed",
+ },
+ err: &Error{
+ Type: ErrBadData,
+ Msg: "failed",
+ },
+ code: statusAPIError,
+ expected: `null`,
+ },
+ {
+ response: &apiResponse{
+ Status: "error",
+ Data: json.RawMessage(`"test"`),
+ ErrorType: ErrTimeout,
+ Error: "timed out",
+ },
+ err: &Error{
+ Type: ErrTimeout,
+ Msg: "timed out",
+ },
+ code: statusAPIError,
+ expected: `test`,
+ },
+ {
+ response: "bad json",
+ err: &Error{
+ Type: ErrBadResponse,
+ Msg: "bad response code 400",
+ },
+ code: http.StatusBadRequest,
+ },
+ {
+ response: "bad json",
+ err: &Error{
+ Type: ErrBadResponse,
+ Msg: "invalid character 'b' looking for beginning of value",
+ },
+ code: statusAPIError,
+ },
+ {
+ response: &apiResponse{
+ Status: "success",
+ Data: json.RawMessage(`"test"`),
+ },
+ err: &Error{
+ Type: ErrBadResponse,
+ Msg: "inconsistent body for response code",
+ },
+ code: statusAPIError,
+ },
+ {
+ response: &apiResponse{
+ Status: "success",
+ Data: json.RawMessage(`"test"`),
+ ErrorType: ErrTimeout,
+ Error: "timed out",
+ },
+ err: &Error{
+ Type: ErrBadResponse,
+ Msg: "inconsistent body for response code",
+ },
+ code: statusAPIError,
+ },
+ {
+ response: &apiResponse{
+ Status: "error",
+ Data: json.RawMessage(`"test"`),
+ ErrorType: ErrTimeout,
+ Error: "timed out",
+ },
+ err: &Error{
+ Type: ErrBadResponse,
+ Msg: "inconsistent body for response code",
+ },
+ code: http.StatusOK,
+ },
+ }
+
+ tc := &testClient{
+ T: t,
+ ch: make(chan apiClientTest, 1),
+ req: &http.Request{},
+ }
+ client := &apiClient{tc}
+
+ for _, test := range tests {
+
+ tc.ch <- test
+
+ _, body, err := client.do(context.Background(), tc.req)
+
+ if test.err != nil {
+ if err == nil {
+ t.Errorf("expected error %q but got none", test.err)
+ continue
+ }
+ if test.err.Error() != err.Error() {
+ t.Errorf("unexpected error: want %q, got %q", test.err, err)
+ }
+ continue
+ }
+ if err != nil {
+ t.Errorf("unexpeceted error %s", err)
+ continue
+ }
+
+ want, got := test.expected, string(body)
+ if want != got {
+ t.Errorf("unexpected body: want %q, got %q", want, got)
+ }
+ }
+}
+
+type apiTestClient struct {
+ *testing.T
+ curTest apiTest
+}
+
+type apiTest struct {
+ do func() (interface{}, error)
+ inErr error
+ inRes interface{}
+
+ reqPath string
+ reqParam url.Values
+ reqMethod string
+ res interface{}
+ err error
+}
+
+func (c *apiTestClient) url(ep string, args map[string]string) *url.URL {
+ u := &url.URL{
+ Host: "test:9090",
+ Path: apiPrefix + ep,
+ }
+ return u
+}
+
+func (c *apiTestClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
+
+ test := c.curTest
+
+ if req.URL.Path != test.reqPath {
+ c.Errorf("unexpected request path: want %s, got %s", test.reqPath, req.URL.Path)
+ }
+ if req.Method != test.reqMethod {
+ c.Errorf("unexpected request method: want %s, got %s", test.reqMethod, req.Method)
+ }
+
+ b, err := json.Marshal(test.inRes)
+ if err != nil {
+ c.Fatal(err)
+ }
+
+ resp := &http.Response{}
+ if test.inErr != nil {
+ resp.StatusCode = statusAPIError
+ } else {
+ resp.StatusCode = http.StatusOK
+ }
+
+ return resp, b, test.inErr
+}
+
+func TestAPIs(t *testing.T) {
+
+ testTime := time.Now()
+
+ client := &apiTestClient{T: t}
+
+ queryApi := &httpQueryAPI{
+ client: client,
+ }
+
+ doQuery := func(q string, ts time.Time) func() (interface{}, error) {
+ return func() (interface{}, error) {
+ return queryApi.Query(context.Background(), q, ts)
+ }
+ }
+
+ doQueryRange := func(q string, rng Range) func() (interface{}, error) {
+ return func() (interface{}, error) {
+ return queryApi.QueryRange(context.Background(), q, rng)
+ }
+ }
+
+ queryTests := []apiTest{
+ {
+ do: doQuery("2", testTime),
+ inRes: &queryResult{
+ Type: model.ValScalar,
+ Result: &model.Scalar{
+ Value: 2,
+ Timestamp: model.TimeFromUnix(testTime.Unix()),
+ },
+ },
+
+ reqMethod: "GET",
+ reqPath: "/api/v1/query",
+ reqParam: url.Values{
+ "query": []string{"2"},
+ "time": []string{testTime.Format(time.RFC3339Nano)},
+ },
+ res: &model.Scalar{
+ Value: 2,
+ Timestamp: model.TimeFromUnix(testTime.Unix()),
+ },
+ },
+ {
+ do: doQuery("2", testTime),
+ inErr: fmt.Errorf("some error"),
+
+ reqMethod: "GET",
+ reqPath: "/api/v1/query",
+ reqParam: url.Values{
+ "query": []string{"2"},
+ "time": []string{testTime.Format(time.RFC3339Nano)},
+ },
+ err: fmt.Errorf("some error"),
+ },
+
+ {
+ do: doQueryRange("2", Range{
+ Start: testTime.Add(-time.Minute),
+ End: testTime,
+ Step: time.Minute,
+ }),
+ inErr: fmt.Errorf("some error"),
+
+ reqMethod: "GET",
+ reqPath: "/api/v1/query_range",
+ reqParam: url.Values{
+ "query": []string{"2"},
+ "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
+ "end": []string{testTime.Format(time.RFC3339Nano)},
+ "step": []string{time.Minute.String()},
+ },
+ err: fmt.Errorf("some error"),
+ },
+ }
+
+ var tests []apiTest
+ tests = append(tests, queryTests...)
+
+ for _, test := range tests {
+ client.curTest = test
+
+ res, err := test.do()
+
+ if test.err != nil {
+ if err == nil {
+ t.Errorf("expected error %q but got none", test.err)
+ continue
+ }
+ if err.Error() != test.err.Error() {
+ t.Errorf("unexpected error: want %s, got %s", test.err, err)
+ }
+ continue
+ }
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ continue
+ }
+
+ if !reflect.DeepEqual(res, test.res) {
+ t.Errorf("unexpected result: want %v, got %v", test.res, res)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/examples/random/main.go b/vendor/github.com/prometheus/client_golang/examples/random/main.go
new file mode 100644
index 000000000..563957193
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/examples/random/main.go
@@ -0,0 +1,103 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A simple example exposing fictional RPC latencies with different types of
+// random distributions (uniform, normal, and exponential) as Prometheus
+// metrics.
+package main
+
+import (
+ "flag"
+ "math"
+ "math/rand"
+ "net/http"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.")
+ uniformDomain = flag.Float64("uniform.domain", 200, "The domain for the uniform distribution.")
+ normDomain = flag.Float64("normal.domain", 200, "The domain for the normal distribution.")
+ normMean = flag.Float64("normal.mean", 10, "The mean for the normal distribution.")
+ oscillationPeriod = flag.Duration("oscillation-period", 10*time.Minute, "The duration of the rate oscillation period.")
+)
+
+var (
+ // Create a summary to track fictional interservice RPC latencies for three
+ // distinct services with different latency distributions. These services are
+ // differentiated via a "service" label.
+ rpcDurations = prometheus.NewSummaryVec(
+ prometheus.SummaryOpts{
+ Name: "rpc_durations_microseconds",
+ Help: "RPC latency distributions.",
+ },
+ []string{"service"},
+ )
+ // The same as above, but now as a histogram, and only for the normal
+ // distribution. The buckets are targeted to the parameters of the
+ // normal distribution, with 20 buckets centered on the mean, each
+ // half-sigma wide.
+ rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Name: "rpc_durations_histogram_microseconds",
+ Help: "RPC latency distributions.",
+ Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20),
+ })
+)
+
+func init() {
+ // Register the summary and the histogram with Prometheus's default registry.
+ prometheus.MustRegister(rpcDurations)
+ prometheus.MustRegister(rpcDurationsHistogram)
+}
+
+func main() {
+ flag.Parse()
+
+ start := time.Now()
+
+ oscillationFactor := func() float64 {
+ return 2 + math.Sin(math.Sin(2*math.Pi*float64(time.Since(start))/float64(*oscillationPeriod)))
+ }
+
+ // Periodically record some sample latencies for the three services.
+ go func() {
+ for {
+ v := rand.Float64() * *uniformDomain
+ rpcDurations.WithLabelValues("uniform").Observe(v)
+ time.Sleep(time.Duration(100*oscillationFactor()) * time.Millisecond)
+ }
+ }()
+
+ go func() {
+ for {
+ v := (rand.NormFloat64() * *normDomain) + *normMean
+ rpcDurations.WithLabelValues("normal").Observe(v)
+ rpcDurationsHistogram.Observe(v)
+ time.Sleep(time.Duration(75*oscillationFactor()) * time.Millisecond)
+ }
+ }()
+
+ go func() {
+ for {
+ v := rand.ExpFloat64()
+ rpcDurations.WithLabelValues("exponential").Observe(v)
+ time.Sleep(time.Duration(50*oscillationFactor()) * time.Millisecond)
+ }
+ }()
+
+ // Expose the registered metrics via HTTP.
+ http.Handle("/metrics", prometheus.Handler())
+ http.ListenAndServe(*addr, nil)
+}
diff --git a/vendor/github.com/prometheus/client_golang/examples/simple/main.go b/vendor/github.com/prometheus/client_golang/examples/simple/main.go
new file mode 100644
index 000000000..19620d2b3
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/examples/simple/main.go
@@ -0,0 +1,30 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A minimal example of how to include Prometheus instrumentation.
+package main
+
+import (
+ "flag"
+ "net/http"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.")
+
+func main() {
+ flag.Parse()
+ http.Handle("/metrics", prometheus.Handler())
+ http.ListenAndServe(*addr, nil)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
new file mode 100644
index 000000000..3460f0346
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
@@ -0,0 +1 @@
+command-line-arguments.test
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
new file mode 100644
index 000000000..44986bff0
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md
@@ -0,0 +1 @@
+See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go b/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go
new file mode 100644
index 000000000..a3d86698b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go
@@ -0,0 +1,183 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "sync"
+ "testing"
+)
+
+func BenchmarkCounterWithLabelValues(b *testing.B) {
+ m := NewCounterVec(
+ CounterOpts{
+ Name: "benchmark_counter",
+ Help: "A counter to benchmark it.",
+ },
+ []string{"one", "two", "three"},
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.WithLabelValues("eins", "zwei", "drei").Inc()
+ }
+}
+
+func BenchmarkCounterWithLabelValuesConcurrent(b *testing.B) {
+ m := NewCounterVec(
+ CounterOpts{
+ Name: "benchmark_counter",
+ Help: "A counter to benchmark it.",
+ },
+ []string{"one", "two", "three"},
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+ wg := sync.WaitGroup{}
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ for j := 0; j < b.N/10; j++ {
+ m.WithLabelValues("eins", "zwei", "drei").Inc()
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+func BenchmarkCounterWithMappedLabels(b *testing.B) {
+ m := NewCounterVec(
+ CounterOpts{
+ Name: "benchmark_counter",
+ Help: "A counter to benchmark it.",
+ },
+ []string{"one", "two", "three"},
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.With(Labels{"two": "zwei", "one": "eins", "three": "drei"}).Inc()
+ }
+}
+
+func BenchmarkCounterWithPreparedMappedLabels(b *testing.B) {
+ m := NewCounterVec(
+ CounterOpts{
+ Name: "benchmark_counter",
+ Help: "A counter to benchmark it.",
+ },
+ []string{"one", "two", "three"},
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+ labels := Labels{"two": "zwei", "one": "eins", "three": "drei"}
+ for i := 0; i < b.N; i++ {
+ m.With(labels).Inc()
+ }
+}
+
+func BenchmarkCounterNoLabels(b *testing.B) {
+ m := NewCounter(CounterOpts{
+ Name: "benchmark_counter",
+ Help: "A counter to benchmark it.",
+ })
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.Inc()
+ }
+}
+
+func BenchmarkGaugeWithLabelValues(b *testing.B) {
+ m := NewGaugeVec(
+ GaugeOpts{
+ Name: "benchmark_gauge",
+ Help: "A gauge to benchmark it.",
+ },
+ []string{"one", "two", "three"},
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.WithLabelValues("eins", "zwei", "drei").Set(3.1415)
+ }
+}
+
+func BenchmarkGaugeNoLabels(b *testing.B) {
+ m := NewGauge(GaugeOpts{
+ Name: "benchmark_gauge",
+ Help: "A gauge to benchmark it.",
+ })
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.Set(3.1415)
+ }
+}
+
+func BenchmarkSummaryWithLabelValues(b *testing.B) {
+ m := NewSummaryVec(
+ SummaryOpts{
+ Name: "benchmark_summary",
+ Help: "A summary to benchmark it.",
+ },
+ []string{"one", "two", "three"},
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415)
+ }
+}
+
+func BenchmarkSummaryNoLabels(b *testing.B) {
+ m := NewSummary(SummaryOpts{
+ Name: "benchmark_summary",
+ Help: "A summary to benchmark it.",
+ },
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.Observe(3.1415)
+ }
+}
+
+func BenchmarkHistogramWithLabelValues(b *testing.B) {
+ m := NewHistogramVec(
+ HistogramOpts{
+ Name: "benchmark_histogram",
+ Help: "A histogram to benchmark it.",
+ },
+ []string{"one", "two", "three"},
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415)
+ }
+}
+
+func BenchmarkHistogramNoLabels(b *testing.B) {
+ m := NewHistogram(HistogramOpts{
+ Name: "benchmark_histogram",
+ Help: "A histogram to benchmark it.",
+ },
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.Observe(3.1415)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
new file mode 100644
index 000000000..623d3d83f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -0,0 +1,75 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Collector is the interface implemented by anything that can be used by
+// Prometheus to collect metrics. A Collector has to be registered for
+// collection. See Registerer.Register.
+//
+// The stock metrics provided by this package (Gauge, Counter, Summary,
+// Histogram, Untyped) are also Collectors (which only ever collect one metric,
+// namely itself). An implementer of Collector may, however, collect multiple
+// metrics in a coordinated fashion and/or create metrics on the fly. Examples
+// for collectors already implemented in this library are the metric vectors
+// (i.e. collection of multiple instances of the same Metric but with different
+// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
+type Collector interface {
+ // Describe sends the super-set of all possible descriptors of metrics
+ // collected by this Collector to the provided channel and returns once
+ // the last descriptor has been sent. The sent descriptors fulfill the
+ // consistency and uniqueness requirements described in the Desc
+ // documentation. (It is valid if one and the same Collector sends
+ // duplicate descriptors. Those duplicates are simply ignored. However,
+ // two different Collectors must not send duplicate descriptors.) This
+ // method idempotently sends the same descriptors throughout the
+ // lifetime of the Collector. If a Collector encounters an error while
+ // executing this method, it must send an invalid descriptor (created
+ // with NewInvalidDesc) to signal the error to the registry.
+ Describe(chan<- *Desc)
+ // Collect is called by the Prometheus registry when collecting
+ // metrics. The implementation sends each collected metric via the
+ // provided channel and returns once the last metric has been sent. The
+ // descriptor of each sent metric is one of those returned by
+ // Describe. Returned metrics that share the same descriptor must differ
+ // in their variable label values. This method may be called
+ // concurrently and must therefore be implemented in a concurrency safe
+ // way. Blocking occurs at the expense of total performance of rendering
+ // all registered metrics. Ideally, Collector implementations support
+ // concurrent readers.
+ Collect(chan<- Metric)
+}
+
+// selfCollector implements Collector for a single Metric so that the Metric
+// collects itself. Add it as an anonymous field to a struct that implements
+// Metric, and call init with the Metric itself as an argument.
+type selfCollector struct {
+ self Metric
+}
+
+// init provides the selfCollector with a reference to the metric it is supposed
+// to collect. It is usually called within the factory function to create a
+// metric. See example.
+func (c *selfCollector) init(self Metric) {
+ c.self = self
+}
+
+// Describe implements Collector.
+func (c *selfCollector) Describe(ch chan<- *Desc) {
+ ch <- c.self.Desc()
+}
+
+// Collect implements Collector.
+func (c *selfCollector) Collect(ch chan<- Metric) {
+ ch <- c.self
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
new file mode 100644
index 000000000..ee37949ad
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -0,0 +1,172 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+)
+
+// Counter is a Metric that represents a single numerical value that only ever
+// goes up. That implies that it cannot be used to count items whose number can
+// also go down, e.g. the number of currently running goroutines. Those
+// "counters" are represented by Gauges.
+//
+// A Counter is typically used to count requests served, tasks completed, errors
+// occurred, etc.
+//
+// To create Counter instances, use NewCounter.
+type Counter interface {
+ Metric
+ Collector
+
+ // Set is used to set the Counter to an arbitrary value. It is only used
+ // if you have to transfer a value from an external counter into this
+ // Prometheus metric. Do not use it for regular handling of a
+ // Prometheus counter (as it can be used to break the contract of
+ // monotonically increasing values).
+ //
+ // Deprecated: Use NewConstMetric to create a counter for an external
+ // value. A Counter should never be set.
+ Set(float64)
+ // Inc increments the counter by 1.
+ Inc()
+ // Add adds the given value to the counter. It panics if the value is <
+ // 0.
+ Add(float64)
+}
+
+// CounterOpts is an alias for Opts. See there for doc comments.
+type CounterOpts Opts
+
+// NewCounter creates a new Counter based on the provided CounterOpts.
+func NewCounter(opts CounterOpts) Counter {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ )
+ result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
+ result.init(result) // Init self-collection.
+ return result
+}
+
+type counter struct {
+ value
+}
+
+func (c *counter) Add(v float64) {
+ if v < 0 {
+ panic(errors.New("counter cannot decrease in value"))
+ }
+ c.value.Add(v)
+}
+
+// CounterVec is a Collector that bundles a set of Counters that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. number of HTTP requests, partitioned by response code and
+// method). Create instances with NewCounterVec.
+//
+// CounterVec embeds MetricVec. See there for a full list of methods with
+// detailed documentation.
+type CounterVec struct {
+ *MetricVec
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &CounterVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ result := &counter{value: value{
+ desc: desc,
+ valType: CounterValue,
+ labelPairs: makeLabelPairs(desc, lvs),
+ }}
+ result.init(result) // Init self-collection.
+ return result
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Counter and not a
+// Metric so that no type conversion is required.
+func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Counter and not a Metric so that no
+// type conversion is required.
+func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
+ return m.MetricVec.WithLabelValues(lvs...).(Counter)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *CounterVec) With(labels Labels) Counter {
+ return m.MetricVec.With(labels).(Counter)
+}
+
+// CounterFunc is a Counter whose value is determined at collect time by calling a
+// provided function.
+//
+// To create CounterFunc instances, use NewCounterFunc.
+type CounterFunc interface {
+ Metric
+ Collector
+}
+
+// NewCounterFunc creates a new CounterFunc based on the provided
+// CounterOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a CounterFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe. The function should also honor
+// the contract for a Counter (values only go up, not down), but compliance will
+// not be checked.
+func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), CounterValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go b/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go
new file mode 100644
index 000000000..67391a23a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go
@@ -0,0 +1,58 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "math"
+ "testing"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+func TestCounterAdd(t *testing.T) {
+ counter := NewCounter(CounterOpts{
+ Name: "test",
+ Help: "test help",
+ ConstLabels: Labels{"a": "1", "b": "2"},
+ }).(*counter)
+ counter.Inc()
+ if expected, got := 1., math.Float64frombits(counter.valBits); expected != got {
+ t.Errorf("Expected %f, got %f.", expected, got)
+ }
+ counter.Add(42)
+ if expected, got := 43., math.Float64frombits(counter.valBits); expected != got {
+ t.Errorf("Expected %f, got %f.", expected, got)
+ }
+
+ if expected, got := "counter cannot decrease in value", decreaseCounter(counter).Error(); expected != got {
+ t.Errorf("Expected error %q, got %q.", expected, got)
+ }
+
+ m := &dto.Metric{}
+ counter.Write(m)
+
+ if expected, got := `label:<name:"a" value:"1" > label:<name:"b" value:"2" > counter:<value:43 > `, m.String(); expected != got {
+ t.Errorf("expected %q, got %q", expected, got)
+ }
+}
+
+func decreaseCounter(c *counter) (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ err = e.(error)
+ }
+ }()
+ c.Add(-1)
+ return nil
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
new file mode 100644
index 000000000..77f4b30e8
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -0,0 +1,205 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+var (
+ metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
+ labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+)
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// Desc is the descriptor used by every Prometheus Metric. It is essentially
+// the immutable meta-data of a Metric. The normal Metric implementations
+// included in this package manage their Desc under the hood. Users only have to
+// deal with Desc if they use advanced features like the ExpvarCollector or
+// custom Collectors and Metrics.
+//
+// Descriptors registered with the same registry have to fulfill certain
+// consistency and uniqueness criteria if they share the same fully-qualified
+// name: They must have the same help string and the same label names (aka label
+// dimensions) in each, constLabels and variableLabels, but they must differ in
+// the values of the constLabels.
+//
+// Descriptors that share the same fully-qualified names and the same label
+// values of their constLabels are considered equal.
+//
+// Use NewDesc to create new Desc instances.
+type Desc struct {
+ // fqName has been built from Namespace, Subsystem, and Name.
+ fqName string
+ // help provides some helpful information about this metric.
+ help string
+ // constLabelPairs contains precalculated DTO label pairs based on
+ // the constant labels.
+ constLabelPairs []*dto.LabelPair
+ // VariableLabels contains names of labels for which the metric
+ // maintains variable values.
+ variableLabels []string
+ // id is a hash of the values of the ConstLabels and fqName. This
+ // must be unique among all registered descriptors and can therefore be
+ // used as an identifier of the descriptor.
+ id uint64
+ // dimHash is a hash of the label names (preset and variable) and the
+ // Help string. Each Desc with the same fqName must have the same
+ // dimHash.
+ dimHash uint64
+ // err is an error that occured during construction. It is reported on
+ // registration time.
+ err error
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName and help must not be empty.
+//
+// variableLabels only contain the label names. Their label values are variable
+// and therefore not part of the Desc. (They are managed within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Opts documentation for the implications of
+// constant labels.
+func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+ d := &Desc{
+ fqName: fqName,
+ help: help,
+ variableLabels: variableLabels,
+ }
+ if help == "" {
+ d.err = errors.New("empty help string")
+ return d
+ }
+ if !metricNameRE.MatchString(fqName) {
+ d.err = fmt.Errorf("%q is not a valid metric name", fqName)
+ return d
+ }
+ // labelValues contains the label values of const labels (in order of
+ // their sorted label names) plus the fqName (at position 0).
+ labelValues := make([]string, 1, len(constLabels)+1)
+ labelValues[0] = fqName
+ labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+ labelNameSet := map[string]struct{}{}
+ // First add only the const label names and sort them...
+ for labelName := range constLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ sort.Strings(labelNames)
+ // ... so that we can now add const label values in the order of their names.
+ for _, labelName := range labelNames {
+ labelValues = append(labelValues, constLabels[labelName])
+ }
+ // Now add the variable label names, but prefix them with something that
+ // cannot be in a regular label name. That prevents matching the label
+ // dimension with a different mix between preset and variable labels.
+ for _, labelName := range variableLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, "$"+labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ if len(labelNames) != len(labelNameSet) {
+ d.err = errors.New("duplicate label names")
+ return d
+ }
+ vh := hashNew()
+ for _, val := range labelValues {
+ vh = hashAdd(vh, val)
+ vh = hashAddByte(vh, separatorByte)
+ }
+ d.id = vh
+ // Sort labelNames so that order doesn't matter for the hash.
+ sort.Strings(labelNames)
+ // Now hash together (in this order) the help string and the sorted
+ // label names.
+ lh := hashNew()
+ lh = hashAdd(lh, help)
+ lh = hashAddByte(lh, separatorByte)
+ for _, labelName := range labelNames {
+ lh = hashAdd(lh, labelName)
+ lh = hashAddByte(lh, separatorByte)
+ }
+ d.dimHash = lh
+
+ d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
+ for n, v := range constLabels {
+ d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(v),
+ })
+ }
+ sort.Sort(LabelPairSorter(d.constLabelPairs))
+ return d
+}
+
+// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
+// provided error set. If a collector returning such a descriptor is registered,
+// registration will fail with the provided error. NewInvalidDesc can be used by
+// a Collector to signal inability to describe itself.
+func NewInvalidDesc(err error) *Desc {
+ return &Desc{
+ err: err,
+ }
+}
+
+func (d *Desc) String() string {
+ lpStrings := make([]string, 0, len(d.constLabelPairs))
+ for _, lp := range d.constLabelPairs {
+ lpStrings = append(
+ lpStrings,
+ fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
+ )
+ }
+ return fmt.Sprintf(
+ "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
+ d.fqName,
+ d.help,
+ strings.Join(lpStrings, ","),
+ d.variableLabels,
+ )
+}
+
+func checkLabelName(l string) bool {
+ return labelNameRE.MatchString(l) &&
+ !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
new file mode 100644
index 000000000..b15a2d3b9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -0,0 +1,181 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus provides metrics primitives to instrument code for
+// monitoring. It also offers a registry for metrics. Sub-packages allow to
+// expose the registered metrics via HTTP (package promhttp) or push them to a
+// Pushgateway (package push).
+//
+// All exported functions and methods are safe to be used concurrently unless
+//specified otherwise.
+//
+// A Basic Example
+//
+// As a starting point, a very basic usage example:
+//
+// package main
+//
+// import (
+// "net/http"
+//
+// "github.com/prometheus/client_golang/prometheus"
+// "github.com/prometheus/client_golang/prometheus/promhttp"
+// )
+//
+// var (
+// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// })
+// hdFailures = prometheus.NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// },
+// []string{"device"},
+// )
+// )
+//
+// func init() {
+// // Metrics have to be registered to be exposed:
+// prometheus.MustRegister(cpuTemp)
+// prometheus.MustRegister(hdFailures)
+// }
+//
+// func main() {
+// cpuTemp.Set(65.3)
+// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
+//
+// // The Handler function provides a default handler to expose metrics
+// // via an HTTP server. "/metrics" is the usual endpoint for that.
+// http.Handle("/metrics", promhttp.Handler())
+// http.ListenAndServe(":8080", nil)
+// }
+//
+//
+// This is a complete program that exports two metrics, a Gauge and a Counter,
+// the latter with a label attached to turn it into a (one-dimensional) vector.
+//
+// Metrics
+//
+// The number of exported identifiers in this package might appear a bit
+// overwhelming. Hovever, in addition to the basic plumbing shown in the example
+// above, you only need to understand the different metric types and their
+// vector versions for basic usage.
+//
+// Above, you have already touched the Counter and the Gauge. There are two more
+// advanced metric types: the Summary and Histogram. A more thorough description
+// of those four metric types can be found in the Prometheus docs:
+// https://prometheus.io/docs/concepts/metric_types/
+//
+// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
+// Prometheus server not to assume anything about its type.
+//
+// In addition to the fundamental metric types Gauge, Counter, Summary,
+// Histogram, and Untyped, a very important part of the Prometheus data model is
+// the partitioning of samples along dimensions called labels, which results in
+// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
+// HistogramVec, and UntypedVec.
+//
+// While only the fundamental metric types implement the Metric interface, both
+// the metrics and their vector versions implement the Collector interface. A
+// Collector manages the collection of a number of Metrics, but for convenience,
+// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
+// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
+// SummaryVec, HistogramVec, and UntypedVec are not.
+//
+// To create instances of Metrics and their vector versions, you need a suitable
+// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
+// HistogramOpts, or UntypedOpts.
+//
+// Custom Collectors and constant Metrics
+//
+// While you could create your own implementations of Metric, most likely you
+// will only ever implement the Collector interface on your own. At a first
+// glance, a custom Collector seems handy to bundle Metrics for common
+// registration (with the prime example of the different metric vectors above,
+// which bundle all the metrics of the same name but with different labels).
+//
+// There is a more involved use case, too: If you already have metrics
+// available, created outside of the Prometheus context, you don't need the
+// interface of the various Metric types. You essentially want to mirror the
+// existing numbers into Prometheus Metrics during collection. An own
+// implementation of the Collector interface is perfect for that. You can create
+// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
+// NewConstSummary (and their respective Must… versions). That will happen in
+// the Collect method. The Describe method has to return separate Desc
+// instances, representative of the “throw-away” metrics to be created
+// later. NewDesc comes in handy to create those Desc instances.
+//
+// The Collector example illustrates the use case. You can also look at the
+// source code of the processCollector (mirroring process metrics), the
+// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
+// metrics) as examples that are used in this package itself.
+//
+// If you just need to call a function to get a single float value to collect as
+// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
+// shortcuts.
+//
+// Advanced Uses of the Registry
+//
+// While MustRegister is the by far most common way of registering a Collector,
+// sometimes you might want to handle the errors the registration might
+// cause. As suggested by the name, MustRegister panics if an error occurs. With
+// the Register function, the error is returned and can be handled.
+//
+// An error is returned if the registered Collector is incompatible or
+// inconsistent with already registered metrics. The registry aims for
+// consistency of the collected metrics according to the Prometheus data
+// model. Inconsistencies are ideally detected at registration time, not at
+// collect time. The former will usually be detected at start-up time of a
+// program, while the latter will only happen at scrape time, possibly not even
+// on the first scrape if the inconsistency only becomes relevant later. That is
+// the main reason why a Collector and a Metric have to describe themselves to
+// the registry.
+//
+// So far, everything we did operated on the so-called default registry, as it
+// can be found in the global DefaultRegistry variable. With NewRegistry, you
+// can create a custom registry, or you can even implement the Registerer or
+// Gatherer interfaces yourself. The methods Register and Unregister work in
+// the same way on a custom registry as the global functions Register and
+// Unregister on the default registry.
+//
+// There are a number of uses for custom registries: You can use registries
+// with special properties, see NewPedanticRegistry. You can avoid global state,
+// as it is imposed by the DefaultRegistry. You can use multiple registries at
+// the same time to expose different metrics in different ways. You can use
+// separate registries for testing purposes.
+//
+// Also note that the DefaultRegistry comes registered with a Collector for Go
+// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
+// NewProcessCollector). With a custom registry, you are in control and decide
+// yourself about the Collectors to register.
+//
+// HTTP Exposition
+//
+// The Registry implements the Gatherer interface. The caller of the Gather
+// method can then expose the gathered metrics in some way. Usually, the metrics
+// are served via HTTP on the /metrics endpoint. That's happening in the example
+// above. The tools to expose metrics via HTTP are in the promhttp
+// sub-package. (The top-level functions in the prometheus package are
+// deprecated.)
+//
+// Pushing to the Pushgateway
+//
+// Function for pushing to the Pushgateway can be found in the push sub-package.
+//
+// Other Means of Exposition
+//
+// More ways of exposing metrics can easily be added. Sending metrics to
+// Graphite would be an example that will soon be implemented.
+package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go b/vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go
new file mode 100644
index 000000000..260c1b52d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go
@@ -0,0 +1,118 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus_test
+
+import "github.com/prometheus/client_golang/prometheus"
+
+// ClusterManager is an example for a system that might have been built without
+// Prometheus in mind. It models a central manager of jobs running in a
+// cluster. To turn it into something that collects Prometheus metrics, we
+// simply add the two methods required for the Collector interface.
+//
+// An additional challenge is that multiple instances of the ClusterManager are
+// run within the same binary, each in charge of a different zone. We need to
+// make use of ConstLabels to be able to register each ClusterManager instance
+// with Prometheus.
+type ClusterManager struct {
+ Zone string
+ OOMCountDesc *prometheus.Desc
+ RAMUsageDesc *prometheus.Desc
+ // ... many more fields
+}
+
+// ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a
+// real cluster manager would have to do. Since it may actually be really
+// expensive, it must only be called once per collection. This implementation,
+// obviously, only returns some made-up data.
+func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() (
+ oomCountByHost map[string]int, ramUsageByHost map[string]float64,
+) {
+ // Just example fake data.
+ oomCountByHost = map[string]int{
+ "foo.example.org": 42,
+ "bar.example.org": 2001,
+ }
+ ramUsageByHost = map[string]float64{
+ "foo.example.org": 6.023e23,
+ "bar.example.org": 3.14,
+ }
+ return
+}
+
+// Describe simply sends the two Descs in the struct to the channel.
+func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) {
+ ch <- c.OOMCountDesc
+ ch <- c.RAMUsageDesc
+}
+
+// Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it
+// creates constant metrics for each host on the fly based on the returned data.
+//
+// Note that Collect could be called concurrently, so we depend on
+// ReallyExpensiveAssessmentOfTheSystemState to be concurrency-safe.
+func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) {
+ oomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState()
+ for host, oomCount := range oomCountByHost {
+ ch <- prometheus.MustNewConstMetric(
+ c.OOMCountDesc,
+ prometheus.CounterValue,
+ float64(oomCount),
+ host,
+ )
+ }
+ for host, ramUsage := range ramUsageByHost {
+ ch <- prometheus.MustNewConstMetric(
+ c.RAMUsageDesc,
+ prometheus.GaugeValue,
+ ramUsage,
+ host,
+ )
+ }
+}
+
+// NewClusterManager creates the two Descs OOMCountDesc and RAMUsageDesc. Note
+// that the zone is set as a ConstLabel. (It's different in each instance of the
+// ClusterManager, but constant over the lifetime of an instance.) Then there is
+// a variable label "host", since we want to partition the collected metrics by
+// host. Since all Descs created in this way are consistent across instances,
+// with a guaranteed distinction by the "zone" label, we can register different
+// ClusterManager instances with the same registry.
+func NewClusterManager(zone string) *ClusterManager {
+ return &ClusterManager{
+ Zone: zone,
+ OOMCountDesc: prometheus.NewDesc(
+ "clustermanager_oom_crashes_total",
+ "Number of OOM crashes.",
+ []string{"host"},
+ prometheus.Labels{"zone": zone},
+ ),
+ RAMUsageDesc: prometheus.NewDesc(
+ "clustermanager_ram_usage_bytes",
+ "RAM usage as reported to the cluster manager.",
+ []string{"host"},
+ prometheus.Labels{"zone": zone},
+ ),
+ }
+}
+
+func ExampleCollector() {
+ workerDB := NewClusterManager("db")
+ workerCA := NewClusterManager("ca")
+
+ // Since we are dealing with custom Collector implementations, it might
+ // be a good idea to try it out with a pedantic registry.
+ reg := prometheus.NewPedanticRegistry()
+ reg.MustRegister(workerDB)
+ reg.MustRegister(workerCA)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go b/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go
new file mode 100644
index 000000000..f87f21a8f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go
@@ -0,0 +1,751 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus_test
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "net/http"
+ "runtime"
+ "sort"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/expfmt"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+func ExampleGauge() {
+ opsQueued := prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "our_company",
+ Subsystem: "blob_storage",
+ Name: "ops_queued",
+ Help: "Number of blob storage operations waiting to be processed.",
+ })
+ prometheus.MustRegister(opsQueued)
+
+ // 10 operations queued by the goroutine managing incoming requests.
+ opsQueued.Add(10)
+ // A worker goroutine has picked up a waiting operation.
+ opsQueued.Dec()
+ // And once more...
+ opsQueued.Dec()
+}
+
+func ExampleGaugeVec() {
+ opsQueued := prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: "our_company",
+ Subsystem: "blob_storage",
+ Name: "ops_queued",
+ Help: "Number of blob storage operations waiting to be processed, partitioned by user and type.",
+ },
+ []string{
+ // Which user has requested the operation?
+ "user",
+ // Of what type is the operation?
+ "type",
+ },
+ )
+ prometheus.MustRegister(opsQueued)
+
+ // Increase a value using compact (but order-sensitive!) WithLabelValues().
+ opsQueued.WithLabelValues("bob", "put").Add(4)
+ // Increase a value with a map using WithLabels. More verbose, but order
+ // doesn't matter anymore.
+ opsQueued.With(prometheus.Labels{"type": "delete", "user": "alice"}).Inc()
+}
+
+func ExampleGaugeFunc() {
+ if err := prometheus.Register(prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Subsystem: "runtime",
+ Name: "goroutines_count",
+ Help: "Number of goroutines that currently exist.",
+ },
+ func() float64 { return float64(runtime.NumGoroutine()) },
+ )); err == nil {
+ fmt.Println("GaugeFunc 'goroutines_count' registered.")
+ }
+ // Note that the count of goroutines is a gauge (and not a counter) as
+ // it can go up and down.
+
+ // Output:
+ // GaugeFunc 'goroutines_count' registered.
+}
+
+func ExampleCounter() {
+ pushCounter := prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "repository_pushes", // Note: No help string...
+ })
+ err := prometheus.Register(pushCounter) // ... so this will return an error.
+ if err != nil {
+ fmt.Println("Push counter couldn't be registered, no counting will happen:", err)
+ return
+ }
+
+ // Try it once more, this time with a help string.
+ pushCounter = prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "repository_pushes",
+ Help: "Number of pushes to external repository.",
+ })
+ err = prometheus.Register(pushCounter)
+ if err != nil {
+ fmt.Println("Push counter couldn't be registered AGAIN, no counting will happen:", err)
+ return
+ }
+
+ pushComplete := make(chan struct{})
+ // TODO: Start a goroutine that performs repository pushes and reports
+ // each completion via the channel.
+ for _ = range pushComplete {
+ pushCounter.Inc()
+ }
+ // Output:
+ // Push counter couldn't be registered, no counting will happen: descriptor Desc{fqName: "repository_pushes", help: "", constLabels: {}, variableLabels: []} is invalid: empty help string
+}
+
+func ExampleCounterVec() {
+ httpReqs := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "http_requests_total",
+ Help: "How many HTTP requests processed, partitioned by status code and HTTP method.",
+ },
+ []string{"code", "method"},
+ )
+ prometheus.MustRegister(httpReqs)
+
+ httpReqs.WithLabelValues("404", "POST").Add(42)
+
+ // If you have to access the same set of labels very frequently, it
+ // might be good to retrieve the metric only once and keep a handle to
+ // it. But beware of deletion of that metric, see below!
+ m := httpReqs.WithLabelValues("200", "GET")
+ for i := 0; i < 1000000; i++ {
+ m.Inc()
+ }
+ // Delete a metric from the vector. If you have previously kept a handle
+ // to that metric (as above), future updates via that handle will go
+ // unseen (even if you re-create a metric with the same label set
+ // later).
+ httpReqs.DeleteLabelValues("200", "GET")
+ // Same thing with the more verbose Labels syntax.
+ httpReqs.Delete(prometheus.Labels{"method": "GET", "code": "200"})
+}
+
+func ExampleInstrumentHandler() {
+ // Handle the "/doc" endpoint with the standard http.FileServer handler.
+ // By wrapping the handler with InstrumentHandler, request count,
+ // request and response sizes, and request latency are automatically
+ // exported to Prometheus, partitioned by HTTP status code and method
+ // and by the handler name (here "fileserver").
+ http.Handle("/doc", prometheus.InstrumentHandler(
+ "fileserver", http.FileServer(http.Dir("/usr/share/doc")),
+ ))
+ // The Prometheus handler still has to be registered to handle the
+ // "/metrics" endpoint. The handler returned by prometheus.Handler() is
+ // already instrumented - with "prometheus" as the handler name. In this
+ // example, we want the handler name to be "metrics", so we instrument
+ // the uninstrumented Prometheus handler ourselves.
+ http.Handle("/metrics", prometheus.InstrumentHandler(
+ "metrics", prometheus.UninstrumentedHandler(),
+ ))
+}
+
+func ExampleLabelPairSorter() {
+ labelPairs := []*dto.LabelPair{
+ &dto.LabelPair{Name: proto.String("status"), Value: proto.String("404")},
+ &dto.LabelPair{Name: proto.String("method"), Value: proto.String("get")},
+ }
+
+ sort.Sort(prometheus.LabelPairSorter(labelPairs))
+
+ fmt.Println(labelPairs)
+ // Output:
+ // [name:"method" value:"get" name:"status" value:"404" ]
+}
+
+func ExampleRegister() {
+ // Imagine you have a worker pool and want to count the tasks completed.
+ taskCounter := prometheus.NewCounter(prometheus.CounterOpts{
+ Subsystem: "worker_pool",
+ Name: "completed_tasks_total",
+ Help: "Total number of tasks completed.",
+ })
+ // This will register fine.
+ if err := prometheus.Register(taskCounter); err != nil {
+ fmt.Println(err)
+ } else {
+ fmt.Println("taskCounter registered.")
+ }
+ // Don't forget to tell the HTTP server about the Prometheus handler.
+ // (In a real program, you still need to start the HTTP server...)
+ http.Handle("/metrics", prometheus.Handler())
+
+ // Now you can start workers and give every one of them a pointer to
+ // taskCounter and let it increment it whenever it completes a task.
+ taskCounter.Inc() // This has to happen somewhere in the worker code.
+
+ // But wait, you want to see how individual workers perform. So you need
+ // a vector of counters, with one element for each worker.
+ taskCounterVec := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Subsystem: "worker_pool",
+ Name: "completed_tasks_total",
+ Help: "Total number of tasks completed.",
+ },
+ []string{"worker_id"},
+ )
+
+ // Registering will fail because we already have a metric of that name.
+ if err := prometheus.Register(taskCounterVec); err != nil {
+ fmt.Println("taskCounterVec not registered:", err)
+ } else {
+ fmt.Println("taskCounterVec registered.")
+ }
+
+ // To fix, first unregister the old taskCounter.
+ if prometheus.Unregister(taskCounter) {
+ fmt.Println("taskCounter unregistered.")
+ }
+
+ // Try registering taskCounterVec again.
+ if err := prometheus.Register(taskCounterVec); err != nil {
+ fmt.Println("taskCounterVec not registered:", err)
+ } else {
+ fmt.Println("taskCounterVec registered.")
+ }
+ // Bummer! Still doesn't work.
+
+ // Prometheus will not allow you to ever export metrics with
+ // inconsistent help strings or label names. After unregistering, the
+ // unregistered metrics will cease to show up in the /metrics HTTP
+ // response, but the registry still remembers that those metrics had
+ // been exported before. For this example, we will now choose a
+ // different name. (In a real program, you would obviously not export
+ // the obsolete metric in the first place.)
+ taskCounterVec = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Subsystem: "worker_pool",
+ Name: "completed_tasks_by_id",
+ Help: "Total number of tasks completed.",
+ },
+ []string{"worker_id"},
+ )
+ if err := prometheus.Register(taskCounterVec); err != nil {
+ fmt.Println("taskCounterVec not registered:", err)
+ } else {
+ fmt.Println("taskCounterVec registered.")
+ }
+ // Finally it worked!
+
+ // The workers have to tell taskCounterVec their id to increment the
+ // right element in the metric vector.
+ taskCounterVec.WithLabelValues("42").Inc() // Code from worker 42.
+
+ // Each worker could also keep a reference to their own counter element
+ // around. Pick the counter at initialization time of the worker.
+ myCounter := taskCounterVec.WithLabelValues("42") // From worker 42 initialization code.
+ myCounter.Inc() // Somewhere in the code of that worker.
+
+ // Note that something like WithLabelValues("42", "spurious arg") would
+ // panic (because you have provided too many label values). If you want
+ // to get an error instead, use GetMetricWithLabelValues(...) instead.
+ notMyCounter, err := taskCounterVec.GetMetricWithLabelValues("42", "spurious arg")
+ if err != nil {
+ fmt.Println("Worker initialization failed:", err)
+ }
+ if notMyCounter == nil {
+ fmt.Println("notMyCounter is nil.")
+ }
+
+ // A different (and somewhat tricky) approach is to use
+ // ConstLabels. ConstLabels are pairs of label names and label values
+ // that never change. You might ask what those labels are good for (and
+ // rightfully so - if they never change, they could as well be part of
+ // the metric name). There are essentially two use-cases: The first is
+ // if labels are constant throughout the lifetime of a binary execution,
+ // but they vary over time or between different instances of a running
+ // binary. The second is what we have here: Each worker creates and
+ // registers an own Counter instance where the only difference is in the
+ // value of the ConstLabels. Those Counters can all be registered
+ // because the different ConstLabel values guarantee that each worker
+ // will increment a different Counter metric.
+ counterOpts := prometheus.CounterOpts{
+ Subsystem: "worker_pool",
+ Name: "completed_tasks",
+ Help: "Total number of tasks completed.",
+ ConstLabels: prometheus.Labels{"worker_id": "42"},
+ }
+ taskCounterForWorker42 := prometheus.NewCounter(counterOpts)
+ if err := prometheus.Register(taskCounterForWorker42); err != nil {
+ fmt.Println("taskCounterVForWorker42 not registered:", err)
+ } else {
+ fmt.Println("taskCounterForWorker42 registered.")
+ }
+ // Obviously, in real code, taskCounterForWorker42 would be a member
+ // variable of a worker struct, and the "42" would be retrieved with a
+ // GetId() method or something. The Counter would be created and
+ // registered in the initialization code of the worker.
+
+ // For the creation of the next Counter, we can recycle
+ // counterOpts. Just change the ConstLabels.
+ counterOpts.ConstLabels = prometheus.Labels{"worker_id": "2001"}
+ taskCounterForWorker2001 := prometheus.NewCounter(counterOpts)
+ if err := prometheus.Register(taskCounterForWorker2001); err != nil {
+ fmt.Println("taskCounterVForWorker2001 not registered:", err)
+ } else {
+ fmt.Println("taskCounterForWorker2001 registered.")
+ }
+
+ taskCounterForWorker2001.Inc()
+ taskCounterForWorker42.Inc()
+ taskCounterForWorker2001.Inc()
+
+ // Yet another approach would be to turn the workers themselves into
+ // Collectors and register them. See the Collector example for details.
+
+ // Output:
+ // taskCounter registered.
+ // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string
+ // taskCounter unregistered.
+ // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string
+ // taskCounterVec registered.
+ // Worker initialization failed: inconsistent label cardinality
+ // notMyCounter is nil.
+ // taskCounterForWorker42 registered.
+ // taskCounterForWorker2001 registered.
+}
+
+func ExampleSummary() {
+ temps := prometheus.NewSummary(prometheus.SummaryOpts{
+ Name: "pond_temperature_celsius",
+ Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells.
+ })
+
+ // Simulate some observations.
+ for i := 0; i < 1000; i++ {
+ temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)
+ }
+
+ // Just for demonstration, let's check the state of the summary by
+ // (ab)using its Write method (which is usually only used by Prometheus
+ // internally).
+ metric := &dto.Metric{}
+ temps.Write(metric)
+ fmt.Println(proto.MarshalTextString(metric))
+
+ // Output:
+ // summary: <
+ // sample_count: 1000
+ // sample_sum: 29969.50000000001
+ // quantile: <
+ // quantile: 0.5
+ // value: 31.1
+ // >
+ // quantile: <
+ // quantile: 0.9
+ // value: 41.3
+ // >
+ // quantile: <
+ // quantile: 0.99
+ // value: 41.9
+ // >
+ // >
+}
+
+func ExampleSummaryVec() {
+ temps := prometheus.NewSummaryVec(
+ prometheus.SummaryOpts{
+ Name: "pond_temperature_celsius",
+ Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells.
+ },
+ []string{"species"},
+ )
+
+ // Simulate some observations.
+ for i := 0; i < 1000; i++ {
+ temps.WithLabelValues("litoria-caerulea").Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)
+ temps.WithLabelValues("lithobates-catesbeianus").Observe(32 + math.Floor(100*math.Cos(float64(i)*0.11))/10)
+ }
+
+ // Create a Summary without any observations.
+ temps.WithLabelValues("leiopelma-hochstetteri")
+
+ // Just for demonstration, let's check the state of the summary vector
+ // by registering it with a custom registry and then let it collect the
+ // metrics.
+ reg := prometheus.NewRegistry()
+ reg.MustRegister(temps)
+
+ metricFamilies, err := reg.Gather()
+ if err != nil || len(metricFamilies) != 1 {
+ panic("unexpected behavior of custom test registry")
+ }
+ fmt.Println(proto.MarshalTextString(metricFamilies[0]))
+
+ // Output:
+ // name: "pond_temperature_celsius"
+ // help: "The temperature of the frog pond."
+ // type: SUMMARY
+ // metric: <
+ // label: <
+ // name: "species"
+ // value: "leiopelma-hochstetteri"
+ // >
+ // summary: <
+ // sample_count: 0
+ // sample_sum: 0
+ // quantile: <
+ // quantile: 0.5
+ // value: nan
+ // >
+ // quantile: <
+ // quantile: 0.9
+ // value: nan
+ // >
+ // quantile: <
+ // quantile: 0.99
+ // value: nan
+ // >
+ // >
+ // >
+ // metric: <
+ // label: <
+ // name: "species"
+ // value: "lithobates-catesbeianus"
+ // >
+ // summary: <
+ // sample_count: 1000
+ // sample_sum: 31956.100000000017
+ // quantile: <
+ // quantile: 0.5
+ // value: 32.4
+ // >
+ // quantile: <
+ // quantile: 0.9
+ // value: 41.4
+ // >
+ // quantile: <
+ // quantile: 0.99
+ // value: 41.9
+ // >
+ // >
+ // >
+ // metric: <
+ // label: <
+ // name: "species"
+ // value: "litoria-caerulea"
+ // >
+ // summary: <
+ // sample_count: 1000
+ // sample_sum: 29969.50000000001
+ // quantile: <
+ // quantile: 0.5
+ // value: 31.1
+ // >
+ // quantile: <
+ // quantile: 0.9
+ // value: 41.3
+ // >
+ // quantile: <
+ // quantile: 0.99
+ // value: 41.9
+ // >
+ // >
+ // >
+}
+
+func ExampleNewConstSummary() {
+ desc := prometheus.NewDesc(
+ "http_request_duration_seconds",
+ "A summary of the HTTP request durations.",
+ []string{"code", "method"},
+ prometheus.Labels{"owner": "example"},
+ )
+
+ // Create a constant summary from values we got from a 3rd party telemetry system.
+ s := prometheus.MustNewConstSummary(
+ desc,
+ 4711, 403.34,
+ map[float64]float64{0.5: 42.3, 0.9: 323.3},
+ "200", "get",
+ )
+
+ // Just for demonstration, let's check the state of the summary by
+ // (ab)using its Write method (which is usually only used by Prometheus
+ // internally).
+ metric := &dto.Metric{}
+ s.Write(metric)
+ fmt.Println(proto.MarshalTextString(metric))
+
+ // Output:
+ // label: <
+ // name: "code"
+ // value: "200"
+ // >
+ // label: <
+ // name: "method"
+ // value: "get"
+ // >
+ // label: <
+ // name: "owner"
+ // value: "example"
+ // >
+ // summary: <
+ // sample_count: 4711
+ // sample_sum: 403.34
+ // quantile: <
+ // quantile: 0.5
+ // value: 42.3
+ // >
+ // quantile: <
+ // quantile: 0.9
+ // value: 323.3
+ // >
+ // >
+}
+
+func ExampleHistogram() {
+ temps := prometheus.NewHistogram(prometheus.HistogramOpts{
+ Name: "pond_temperature_celsius",
+ Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells.
+ Buckets: prometheus.LinearBuckets(20, 5, 5), // 5 buckets, each 5 centigrade wide.
+ })
+
+ // Simulate some observations.
+ for i := 0; i < 1000; i++ {
+ temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)
+ }
+
+ // Just for demonstration, let's check the state of the histogram by
+ // (ab)using its Write method (which is usually only used by Prometheus
+ // internally).
+ metric := &dto.Metric{}
+ temps.Write(metric)
+ fmt.Println(proto.MarshalTextString(metric))
+
+ // Output:
+ // histogram: <
+ // sample_count: 1000
+ // sample_sum: 29969.50000000001
+ // bucket: <
+ // cumulative_count: 192
+ // upper_bound: 20
+ // >
+ // bucket: <
+ // cumulative_count: 366
+ // upper_bound: 25
+ // >
+ // bucket: <
+ // cumulative_count: 501
+ // upper_bound: 30
+ // >
+ // bucket: <
+ // cumulative_count: 638
+ // upper_bound: 35
+ // >
+ // bucket: <
+ // cumulative_count: 816
+ // upper_bound: 40
+ // >
+ // >
+}
+
+func ExampleNewConstHistogram() {
+ desc := prometheus.NewDesc(
+ "http_request_duration_seconds",
+ "A histogram of the HTTP request durations.",
+ []string{"code", "method"},
+ prometheus.Labels{"owner": "example"},
+ )
+
+ // Create a constant histogram from values we got from a 3rd party telemetry system.
+ h := prometheus.MustNewConstHistogram(
+ desc,
+ 4711, 403.34,
+ map[float64]uint64{25: 121, 50: 2403, 100: 3221, 200: 4233},
+ "200", "get",
+ )
+
+ // Just for demonstration, let's check the state of the histogram by
+ // (ab)using its Write method (which is usually only used by Prometheus
+ // internally).
+ metric := &dto.Metric{}
+ h.Write(metric)
+ fmt.Println(proto.MarshalTextString(metric))
+
+ // Output:
+ // label: <
+ // name: "code"
+ // value: "200"
+ // >
+ // label: <
+ // name: "method"
+ // value: "get"
+ // >
+ // label: <
+ // name: "owner"
+ // value: "example"
+ // >
+ // histogram: <
+ // sample_count: 4711
+ // sample_sum: 403.34
+ // bucket: <
+ // cumulative_count: 121
+ // upper_bound: 25
+ // >
+ // bucket: <
+ // cumulative_count: 2403
+ // upper_bound: 50
+ // >
+ // bucket: <
+ // cumulative_count: 3221
+ // upper_bound: 100
+ // >
+ // bucket: <
+ // cumulative_count: 4233
+ // upper_bound: 200
+ // >
+ // >
+}
+
+func ExampleAlreadyRegisteredError() {
+ reqCounter := prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "requests_total",
+ Help: "The total number of requests served.",
+ })
+ if err := prometheus.Register(reqCounter); err != nil {
+ if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ // A counter for that metric has been registered before.
+ // Use the old counter from now on.
+ reqCounter = are.ExistingCollector.(prometheus.Counter)
+ } else {
+ // Something else went wrong!
+ panic(err)
+ }
+ }
+}
+
+func ExampleGatherers() {
+ reg := prometheus.NewRegistry()
+ temp := prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Name: "temperature_kelvin",
+ Help: "Temperature in Kelvin.",
+ },
+ []string{"location"},
+ )
+ reg.MustRegister(temp)
+ temp.WithLabelValues("outside").Set(273.14)
+ temp.WithLabelValues("inside").Set(298.44)
+
+ var parser expfmt.TextParser
+
+ text := `
+# TYPE humidity_percent gauge
+# HELP humidity_percent Humidity in %.
+humidity_percent{location="outside"} 45.4
+humidity_percent{location="inside"} 33.2
+# TYPE temperature_kelvin gauge
+# HELP temperature_kelvin Temperature in Kelvin.
+temperature_kelvin{location="somewhere else"} 4.5
+`
+
+ parseText := func() ([]*dto.MetricFamily, error) {
+ parsed, err := parser.TextToMetricFamilies(strings.NewReader(text))
+ if err != nil {
+ return nil, err
+ }
+ var result []*dto.MetricFamily
+ for _, mf := range parsed {
+ result = append(result, mf)
+ }
+ return result, nil
+ }
+
+ gatherers := prometheus.Gatherers{
+ reg,
+ prometheus.GathererFunc(parseText),
+ }
+
+ gathering, err := gatherers.Gather()
+ if err != nil {
+ fmt.Println(err)
+ }
+
+ out := &bytes.Buffer{}
+ for _, mf := range gathering {
+ if _, err := expfmt.MetricFamilyToText(out, mf); err != nil {
+ panic(err)
+ }
+ }
+ fmt.Print(out.String())
+ fmt.Println("----------")
+
+ // Note how the temperature_kelvin metric family has been merged from
+ // different sources. Now try
+ text = `
+# TYPE humidity_percent gauge
+# HELP humidity_percent Humidity in %.
+humidity_percent{location="outside"} 45.4
+humidity_percent{location="inside"} 33.2
+# TYPE temperature_kelvin gauge
+# HELP temperature_kelvin Temperature in Kelvin.
+# Duplicate metric:
+temperature_kelvin{location="outside"} 265.3
+ # Wrong labels:
+temperature_kelvin 4.5
+`
+
+ gathering, err = gatherers.Gather()
+ if err != nil {
+ fmt.Println(err)
+ }
+ // Note that still as many metrics as possible are returned:
+ out.Reset()
+ for _, mf := range gathering {
+ if _, err := expfmt.MetricFamilyToText(out, mf); err != nil {
+ panic(err)
+ }
+ }
+ fmt.Print(out.String())
+
+ // Output:
+ // # HELP humidity_percent Humidity in %.
+ // # TYPE humidity_percent gauge
+ // humidity_percent{location="inside"} 33.2
+ // humidity_percent{location="outside"} 45.4
+ // # HELP temperature_kelvin Temperature in Kelvin.
+ // # TYPE temperature_kelvin gauge
+ // temperature_kelvin{location="inside"} 298.44
+ // temperature_kelvin{location="outside"} 273.14
+ // temperature_kelvin{location="somewhere else"} 4.5
+ // ----------
+ // 2 error(s) occurred:
+ // * collected metric temperature_kelvin label:<name:"location" value:"outside" > gauge:<value:265.3 > was collected before with the same name and label values
+ // * collected metric temperature_kelvin gauge:<value:4.5 > has label dimensions inconsistent with previously collected metrics in the same metric family
+ // # HELP humidity_percent Humidity in %.
+ // # TYPE humidity_percent gauge
+ // humidity_percent{location="inside"} 33.2
+ // humidity_percent{location="outside"} 45.4
+ // # HELP temperature_kelvin Temperature in Kelvin.
+ // # TYPE temperature_kelvin gauge
+ // temperature_kelvin{location="inside"} 298.44
+ // temperature_kelvin{location="outside"} 273.14
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
new file mode 100644
index 000000000..18a99d5fa
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
@@ -0,0 +1,119 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "encoding/json"
+ "expvar"
+)
+
+type expvarCollector struct {
+ exports map[string]*Desc
+}
+
+// NewExpvarCollector returns a newly allocated expvar Collector that still has
+// to be registered with a Prometheus registry.
+//
+// An expvar Collector collects metrics from the expvar interface. It provides a
+// quick way to expose numeric values that are already exported via expvar as
+// Prometheus metrics. Note that the data models of expvar and Prometheus are
+// fundamentally different, and that the expvar Collector is inherently slower
+// than native Prometheus metrics. Thus, the expvar Collector is probably great
+// for experiments and prototying, but you should seriously consider a more
+// direct implementation of Prometheus metrics for monitoring production
+// systems.
+//
+// The exports map has the following meaning:
+//
+// The keys in the map correspond to expvar keys, i.e. for every expvar key you
+// want to export as Prometheus metric, you need an entry in the exports
+// map. The descriptor mapped to each key describes how to export the expvar
+// value. It defines the name and the help string of the Prometheus metric
+// proxying the expvar value. The type will always be Untyped.
+//
+// For descriptors without variable labels, the expvar value must be a number or
+// a bool. The number is then directly exported as the Prometheus sample
+// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
+// that are not numbers or bools are silently ignored.
+//
+// If the descriptor has one variable label, the expvar value must be an expvar
+// map. The keys in the expvar map become the various values of the one
+// Prometheus label. The values in the expvar map must be numbers or bools again
+// as above.
+//
+// For descriptors with more than one variable label, the expvar must be a
+// nested expvar map, i.e. where the values of the topmost map are maps again
+// etc. until a depth is reached that corresponds to the number of labels. The
+// leaves of that structure must be numbers or bools as above to serve as the
+// sample values.
+//
+// Anything that does not fit into the scheme above is silently ignored.
+func NewExpvarCollector(exports map[string]*Desc) Collector {
+ return &expvarCollector{
+ exports: exports,
+ }
+}
+
+// Describe implements Collector.
+func (e *expvarCollector) Describe(ch chan<- *Desc) {
+ for _, desc := range e.exports {
+ ch <- desc
+ }
+}
+
+// Collect implements Collector.
+func (e *expvarCollector) Collect(ch chan<- Metric) {
+ for name, desc := range e.exports {
+ var m Metric
+ expVar := expvar.Get(name)
+ if expVar == nil {
+ continue
+ }
+ var v interface{}
+ labels := make([]string, len(desc.variableLabels))
+ if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
+ ch <- NewInvalidMetric(desc, err)
+ continue
+ }
+ var processValue func(v interface{}, i int)
+ processValue = func(v interface{}, i int) {
+ if i >= len(labels) {
+ copiedLabels := append(make([]string, 0, len(labels)), labels...)
+ switch v := v.(type) {
+ case float64:
+ m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
+ case bool:
+ if v {
+ m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
+ } else {
+ m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
+ }
+ default:
+ return
+ }
+ ch <- m
+ return
+ }
+ vm, ok := v.(map[string]interface{})
+ if !ok {
+ return
+ }
+ for lv, val := range vm {
+ labels[i] = lv
+ processValue(val, i+1)
+ }
+ }
+ processValue(v, 0)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go
new file mode 100644
index 000000000..5d3128fae
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go
@@ -0,0 +1,97 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus_test
+
+import (
+ "expvar"
+ "fmt"
+ "sort"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+func ExampleExpvarCollector() {
+ expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{
+ "memstats": prometheus.NewDesc(
+ "expvar_memstats",
+ "All numeric memstats as one metric family. Not a good role-model, actually... ;-)",
+ []string{"type"}, nil,
+ ),
+ "lone-int": prometheus.NewDesc(
+ "expvar_lone_int",
+ "Just an expvar int as an example.",
+ nil, nil,
+ ),
+ "http-request-map": prometheus.NewDesc(
+ "expvar_http_request_total",
+ "How many http requests processed, partitioned by status code and http method.",
+ []string{"code", "method"}, nil,
+ ),
+ })
+ prometheus.MustRegister(expvarCollector)
+
+ // The Prometheus part is done here. But to show that this example is
+ // doing anything, we have to manually export something via expvar. In
+ // real-life use-cases, some library would already have exported via
+ // expvar what we want to re-export as Prometheus metrics.
+ expvar.NewInt("lone-int").Set(42)
+ expvarMap := expvar.NewMap("http-request-map")
+ var (
+ expvarMap1, expvarMap2 expvar.Map
+ expvarInt11, expvarInt12, expvarInt21, expvarInt22 expvar.Int
+ )
+ expvarMap1.Init()
+ expvarMap2.Init()
+ expvarInt11.Set(3)
+ expvarInt12.Set(13)
+ expvarInt21.Set(11)
+ expvarInt22.Set(212)
+ expvarMap1.Set("POST", &expvarInt11)
+ expvarMap1.Set("GET", &expvarInt12)
+ expvarMap2.Set("POST", &expvarInt21)
+ expvarMap2.Set("GET", &expvarInt22)
+ expvarMap.Set("404", &expvarMap1)
+ expvarMap.Set("200", &expvarMap2)
+ // Results in the following expvar map:
+ // "http-request-count": {"200": {"POST": 11, "GET": 212}, "404": {"POST": 3, "GET": 13}}
+
+ // Let's see what the scrape would yield, but exclude the memstats metrics.
+ metricStrings := []string{}
+ metric := dto.Metric{}
+ metricChan := make(chan prometheus.Metric)
+ go func() {
+ expvarCollector.Collect(metricChan)
+ close(metricChan)
+ }()
+ for m := range metricChan {
+ if strings.Index(m.Desc().String(), "expvar_memstats") == -1 {
+ metric.Reset()
+ m.Write(&metric)
+ metricStrings = append(metricStrings, metric.String())
+ }
+ }
+ sort.Strings(metricStrings)
+ for _, s := range metricStrings {
+ fmt.Println(strings.TrimRight(s, " "))
+ }
+ // Output:
+ // label:<name:"code" value:"200" > label:<name:"method" value:"GET" > untyped:<value:212 >
+ // label:<name:"code" value:"200" > label:<name:"method" value:"POST" > untyped:<value:11 >
+ // label:<name:"code" value:"404" > label:<name:"method" value:"GET" > untyped:<value:13 >
+ // label:<name:"code" value:"404" > label:<name:"method" value:"POST" > untyped:<value:3 >
+ // untyped:<value:42 >
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
new file mode 100644
index 000000000..e3b67df8a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
@@ -0,0 +1,29 @@
+package prometheus
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
new file mode 100644
index 000000000..8b70e5141
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -0,0 +1,140 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Gauge is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// A Gauge is typically used for measured values like temperatures or current
+// memory usage, but also "counts" that can go up and down, like the number of
+// running goroutines.
+//
+// To create Gauge instances, use NewGauge.
+type Gauge interface {
+ Metric
+ Collector
+
+ // Set sets the Gauge to an arbitrary value.
+ Set(float64)
+ // Inc increments the Gauge by 1.
+ Inc()
+ // Dec decrements the Gauge by 1.
+ Dec()
+ // Add adds the given value to the Gauge. (The value can be
+ // negative, resulting in a decrease of the Gauge.)
+ Add(float64)
+ // Sub subtracts the given value from the Gauge. (The value can be
+ // negative, resulting in an increase of the Gauge.)
+ Sub(float64)
+}
+
+// GaugeOpts is an alias for Opts. See there for doc comments.
+type GaugeOpts Opts
+
+// NewGauge creates a new Gauge based on the provided GaugeOpts.
+func NewGauge(opts GaugeOpts) Gauge {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, 0)
+}
+
+// GaugeVec is a Collector that bundles a set of Gauges that all share the same
+// Desc, but have different values for their variable labels. This is used if
+// you want to count the same thing partitioned by various dimensions
+// (e.g. number of operations queued, partitioned by user and operation
+// type). Create instances with NewGaugeVec.
+type GaugeVec struct {
+ *MetricVec
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &GaugeVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newValue(desc, GaugeValue, 0, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Gauge and not a
+// Metric so that no type conversion is required.
+func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Gauge and not a Metric so that no
+// type conversion is required.
+func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+ return m.MetricVec.WithLabelValues(lvs...).(Gauge)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *GaugeVec) With(labels Labels) Gauge {
+ return m.MetricVec.With(labels).(Gauge)
+}
+
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
+// provided function.
+//
+// To create GaugeFunc instances, use NewGaugeFunc.
+type GaugeFunc interface {
+ Metric
+ Collector
+}
+
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
+// value reported is determined by calling the given function from within the
+// Write method. Take into account that metric collection may happen
+// concurrently. If that results in concurrent calls to Write, like in the case
+// where a GaugeFunc is directly registered with Prometheus, the provided
+// function must be concurrency-safe.
+func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go
new file mode 100644
index 000000000..48cab4636
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go
@@ -0,0 +1,182 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "math"
+ "math/rand"
+ "sync"
+ "testing"
+ "testing/quick"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+func listenGaugeStream(vals, result chan float64, done chan struct{}) {
+ var sum float64
+outer:
+ for {
+ select {
+ case <-done:
+ close(vals)
+ for v := range vals {
+ sum += v
+ }
+ break outer
+ case v := <-vals:
+ sum += v
+ }
+ }
+ result <- sum
+ close(result)
+}
+
+func TestGaugeConcurrency(t *testing.T) {
+ it := func(n uint32) bool {
+ mutations := int(n % 10000)
+ concLevel := int(n%15 + 1)
+
+ var start, end sync.WaitGroup
+ start.Add(1)
+ end.Add(concLevel)
+
+ sStream := make(chan float64, mutations*concLevel)
+ result := make(chan float64)
+ done := make(chan struct{})
+
+ go listenGaugeStream(sStream, result, done)
+ go func() {
+ end.Wait()
+ close(done)
+ }()
+
+ gge := NewGauge(GaugeOpts{
+ Name: "test_gauge",
+ Help: "no help can be found here",
+ })
+ for i := 0; i < concLevel; i++ {
+ vals := make([]float64, mutations)
+ for j := 0; j < mutations; j++ {
+ vals[j] = rand.Float64() - 0.5
+ }
+
+ go func(vals []float64) {
+ start.Wait()
+ for _, v := range vals {
+ sStream <- v
+ gge.Add(v)
+ }
+ end.Done()
+ }(vals)
+ }
+ start.Done()
+
+ if expected, got := <-result, math.Float64frombits(gge.(*value).valBits); math.Abs(expected-got) > 0.000001 {
+ t.Fatalf("expected approx. %f, got %f", expected, got)
+ return false
+ }
+ return true
+ }
+
+ if err := quick.Check(it, nil); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestGaugeVecConcurrency(t *testing.T) {
+ it := func(n uint32) bool {
+ mutations := int(n % 10000)
+ concLevel := int(n%15 + 1)
+ vecLength := int(n%5 + 1)
+
+ var start, end sync.WaitGroup
+ start.Add(1)
+ end.Add(concLevel)
+
+ sStreams := make([]chan float64, vecLength)
+ results := make([]chan float64, vecLength)
+ done := make(chan struct{})
+
+ for i := 0; i < vecLength; i++ {
+ sStreams[i] = make(chan float64, mutations*concLevel)
+ results[i] = make(chan float64)
+ go listenGaugeStream(sStreams[i], results[i], done)
+ }
+
+ go func() {
+ end.Wait()
+ close(done)
+ }()
+
+ gge := NewGaugeVec(
+ GaugeOpts{
+ Name: "test_gauge",
+ Help: "no help can be found here",
+ },
+ []string{"label"},
+ )
+ for i := 0; i < concLevel; i++ {
+ vals := make([]float64, mutations)
+ pick := make([]int, mutations)
+ for j := 0; j < mutations; j++ {
+ vals[j] = rand.Float64() - 0.5
+ pick[j] = rand.Intn(vecLength)
+ }
+
+ go func(vals []float64) {
+ start.Wait()
+ for i, v := range vals {
+ sStreams[pick[i]] <- v
+ gge.WithLabelValues(string('A' + pick[i])).Add(v)
+ }
+ end.Done()
+ }(vals)
+ }
+ start.Done()
+
+ for i := range sStreams {
+ if expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+i)).(*value).valBits); math.Abs(expected-got) > 0.000001 {
+ t.Fatalf("expected approx. %f, got %f", expected, got)
+ return false
+ }
+ }
+ return true
+ }
+
+ if err := quick.Check(it, nil); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestGaugeFunc(t *testing.T) {
+ gf := NewGaugeFunc(
+ GaugeOpts{
+ Name: "test_name",
+ Help: "test help",
+ ConstLabels: Labels{"a": "1", "b": "2"},
+ },
+ func() float64 { return 3.1415 },
+ )
+
+ if expected, got := `Desc{fqName: "test_name", help: "test help", constLabels: {a="1",b="2"}, variableLabels: []}`, gf.Desc().String(); expected != got {
+ t.Errorf("expected %q, got %q", expected, got)
+ }
+
+ m := &dto.Metric{}
+ gf.Write(m)
+
+ if expected, got := `label:<name:"a" value:"1" > label:<name:"b" value:"2" > gauge:<value:3.1415 > `, m.String(); expected != got {
+ t.Errorf("expected %q, got %q", expected, got)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
new file mode 100644
index 000000000..abc9d4ec4
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -0,0 +1,263 @@
+package prometheus
+
+import (
+ "fmt"
+ "runtime"
+ "runtime/debug"
+ "time"
+)
+
+type goCollector struct {
+ goroutines Gauge
+ gcDesc *Desc
+
+ // metrics to describe and collect
+ metrics memStatsMetrics
+}
+
+// NewGoCollector returns a collector which exports metrics about the current
+// go process.
+func NewGoCollector() Collector {
+ return &goCollector{
+ goroutines: NewGauge(GaugeOpts{
+ Namespace: "go",
+ Name: "goroutines",
+ Help: "Number of goroutines that currently exist.",
+ }),
+ gcDesc: NewDesc(
+ "go_gc_duration_seconds",
+ "A summary of the GC invocation durations.",
+ nil, nil),
+ metrics: memStatsMetrics{
+ {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes"),
+ "Number of bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes_total"),
+ "Total number of bytes allocated, even if freed.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("sys_bytes"),
+ "Number of bytes obtained by system. Sum of all system allocations.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("lookups_total"),
+ "Total number of pointer lookups.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mallocs_total"),
+ "Total number of mallocs.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("frees_total"),
+ "Total number of frees.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_alloc_bytes"),
+ "Number of heap bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_sys_bytes"),
+ "Number of heap bytes obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_idle_bytes"),
+ "Number of heap bytes waiting to be used.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_inuse_bytes"),
+ "Number of heap bytes that are in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_released_bytes_total"),
+ "Total number of heap bytes released to OS.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_objects"),
+ "Number of allocated objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_inuse_bytes"),
+ "Number of bytes in use by the stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_sys_bytes"),
+ "Number of bytes obtained from system for stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_inuse_bytes"),
+ "Number of bytes in use by mspan structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_sys_bytes"),
+ "Number of bytes used for mspan structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_inuse_bytes"),
+ "Number of bytes in use by mcache structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_sys_bytes"),
+ "Number of bytes used for mcache structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("buck_hash_sys_bytes"),
+ "Number of bytes used by the profiling bucket hash table.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_sys_bytes"),
+ "Number of bytes used for garbage collection system metadata.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("other_sys_bytes"),
+ "Number of bytes used for other system allocations.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("next_gc_bytes"),
+ "Number of heap bytes when next garbage collection will take place.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("last_gc_time_seconds"),
+ "Number of seconds since 1970 of last garbage collection.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
+ valType: GaugeValue,
+ },
+ },
+ }
+}
+
+func memstatNamespace(s string) string {
+ return fmt.Sprintf("go_memstats_%s", s)
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ ch <- c.goroutines.Desc()
+ ch <- c.gcDesc
+
+ for _, i := range c.metrics {
+ ch <- i.desc
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ c.goroutines.Set(float64(runtime.NumGoroutine()))
+ ch <- c.goroutines
+
+ var stats debug.GCStats
+ stats.PauseQuantiles = make([]time.Duration, 5)
+ debug.ReadGCStats(&stats)
+
+ quantiles := make(map[float64]float64)
+ for idx, pq := range stats.PauseQuantiles[1:] {
+ quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
+ }
+ quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
+ ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
+
+ ms := &runtime.MemStats{}
+ runtime.ReadMemStats(ms)
+ for _, i := range c.metrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
+ }
+}
+
+// memStatsMetrics provide description, value, and value type for memstat metrics.
+type memStatsMetrics []struct {
+ desc *Desc
+ eval func(*runtime.MemStats) float64
+ valType ValueType
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go
new file mode 100644
index 000000000..9a8858cbd
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go
@@ -0,0 +1,123 @@
+package prometheus
+
+import (
+ "runtime"
+ "testing"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+func TestGoCollector(t *testing.T) {
+ var (
+ c = NewGoCollector()
+ ch = make(chan Metric)
+ waitc = make(chan struct{})
+ closec = make(chan struct{})
+ old = -1
+ )
+ defer close(closec)
+
+ go func() {
+ c.Collect(ch)
+ go func(c <-chan struct{}) {
+ <-c
+ }(closec)
+ <-waitc
+ c.Collect(ch)
+ }()
+
+ for {
+ select {
+ case metric := <-ch:
+ switch m := metric.(type) {
+ // Attention, this also catches Counter...
+ case Gauge:
+ pb := &dto.Metric{}
+ m.Write(pb)
+ if pb.GetGauge() == nil {
+ continue
+ }
+
+ if old == -1 {
+ old = int(pb.GetGauge().GetValue())
+ close(waitc)
+ continue
+ }
+
+ if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 {
+ // TODO: This is flaky in highly concurrent situations.
+ t.Errorf("want 1 new goroutine, got %d", diff)
+ }
+
+ // GoCollector performs two sends per call.
+ // On line 27 we need to receive the second send
+ // to shut down cleanly.
+ <-ch
+ return
+ }
+ case <-time.After(1 * time.Second):
+ t.Fatalf("expected collect timed out")
+ }
+ }
+}
+
+func TestGCCollector(t *testing.T) {
+ var (
+ c = NewGoCollector()
+ ch = make(chan Metric)
+ waitc = make(chan struct{})
+ closec = make(chan struct{})
+ oldGC uint64
+ oldPause float64
+ )
+ defer close(closec)
+
+ go func() {
+ c.Collect(ch)
+ // force GC
+ runtime.GC()
+ <-waitc
+ c.Collect(ch)
+ }()
+
+ first := true
+ for {
+ select {
+ case metric := <-ch:
+ switch m := metric.(type) {
+ case *constSummary, *value:
+ pb := &dto.Metric{}
+ m.Write(pb)
+ if pb.GetSummary() == nil {
+ continue
+ }
+
+ if len(pb.GetSummary().Quantile) != 5 {
+ t.Errorf("expected 4 buckets, got %d", len(pb.GetSummary().Quantile))
+ }
+ for idx, want := range []float64{0.0, 0.25, 0.5, 0.75, 1.0} {
+ if *pb.GetSummary().Quantile[idx].Quantile != want {
+ t.Errorf("bucket #%d is off, got %f, want %f", idx, *pb.GetSummary().Quantile[idx].Quantile, want)
+ }
+ }
+ if first {
+ first = false
+ oldGC = *pb.GetSummary().SampleCount
+ oldPause = *pb.GetSummary().SampleSum
+ close(waitc)
+ continue
+ }
+ if diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 {
+ t.Errorf("want 1 new garbage collection run, got %d", diff)
+ }
+ if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 {
+ t.Errorf("want moar pause, got %f", diff)
+ }
+ return
+ }
+ case <-time.After(1 * time.Second):
+ t.Fatalf("expected collect timed out")
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
new file mode 100644
index 000000000..9719e8fac
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -0,0 +1,444 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "sync/atomic"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// A Histogram counts individual observations from an event or sample stream in
+// configurable buckets. Similar to a summary, it also provides a sum of
+// observations and an observation count.
+//
+// On the Prometheus server, quantiles can be calculated from a Histogram using
+// the histogram_quantile function in the query language.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated with the
+// Prometheus query language (see the documentation for detailed
+// procedures). However, Histograms require the user to pre-define suitable
+// buckets, and they are in general less accurate. The Observe method of a
+// Histogram has a very low performance overhead in comparison with the Observe
+// method of a Summary.
+//
+// To create Histogram instances, use NewHistogram.
+type Histogram interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the histogram.
+ Observe(float64)
+}
+
+// bucketLabel is used for the label that defines the upper bound of a
+// bucket of a histogram ("le" -> "less or equal").
+const bucketLabel = "le"
+
+// DefBuckets are the default Histogram buckets. The default buckets are
+// tailored to broadly measure the response time (in seconds) of a network
+// service. Most likely, however, you will be required to define buckets
+// customized to your use case.
+var (
+ DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+
+ errBucketLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in histograms", bucketLabel,
+ )
+)
+
+// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
+// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is zero or negative.
+func LinearBuckets(start, width float64, count int) []float64 {
+ if count < 1 {
+ panic("LinearBuckets needs a positive count")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start += width
+ }
+ return buckets
+}
+
+// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
+// upper bound of 'start' and each following bucket's upper bound is 'factor'
+// times the previous bucket's upper bound. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
+// or if 'factor' is less than or equal 1.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBuckets needs a positive count")
+ }
+ if start <= 0 {
+ panic("ExponentialBuckets needs a positive start value")
+ }
+ if factor <= 1 {
+ panic("ExponentialBuckets needs a factor greater than 1")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start *= factor
+ }
+ return buckets
+}
+
+// HistogramOpts bundles the options for creating a Histogram metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type HistogramOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Histogram (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Histogram must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Histogram. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Histogram. Histograms with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // HistogramVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Histograms with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Buckets defines the buckets into which observations are counted. Each
+ // element in the slice is the upper inclusive bound of a bucket. The
+ // values must be sorted in strictly increasing order. There is no need
+ // to add a highest bucket with +Inf bound, it will be added
+ // implicitly. The default value is DefBuckets.
+ Buckets []float64
+}
+
+// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
+// panics if the buckets in HistogramOpts are not in strictly increasing order.
+func NewHistogram(opts HistogramOpts) Histogram {
+ return newHistogram(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Buckets) == 0 {
+ opts.Buckets = DefBuckets
+ }
+
+ h := &histogram{
+ desc: desc,
+ upperBounds: opts.Buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ for i, upperBound := range h.upperBounds {
+ if i < len(h.upperBounds)-1 {
+ if upperBound >= h.upperBounds[i+1] {
+ panic(fmt.Errorf(
+ "histogram buckets must be in increasing order: %f >= %f",
+ upperBound, h.upperBounds[i+1],
+ ))
+ }
+ } else {
+ if math.IsInf(upperBound, +1) {
+ // The +Inf bucket is implicit. Remove it here.
+ h.upperBounds = h.upperBounds[:i]
+ }
+ }
+ }
+ // Finally we know the final length of h.upperBounds and can make counts.
+ h.counts = make([]uint64, len(h.upperBounds))
+
+ h.init(h) // Init self-collection.
+ return h
+}
+
+type histogram struct {
+ // sumBits contains the bits of the float64 representing the sum of all
+ // observations. sumBits and count have to go first in the struct to
+ // guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ sumBits uint64
+ count uint64
+
+ selfCollector
+ // Note that there is no mutex required.
+
+ desc *Desc
+
+ upperBounds []float64
+ counts []uint64
+
+ labelPairs []*dto.LabelPair
+}
+
+func (h *histogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *histogram) Observe(v float64) {
+ // TODO(beorn7): For small numbers of buckets (<30), a linear search is
+ // slightly faster than the binary search. If we really care, we could
+ // switch from one search strategy to the other depending on the number
+ // of buckets.
+ //
+ // Microbenchmarks (BenchmarkHistogramNoLabels):
+ // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
+ // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
+ // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
+ i := sort.SearchFloat64s(h.upperBounds, v)
+ if i < len(h.counts) {
+ atomic.AddUint64(&h.counts[i], 1)
+ }
+ atomic.AddUint64(&h.count, 1)
+ for {
+ oldBits := atomic.LoadUint64(&h.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
+ break
+ }
+ }
+}
+
+func (h *histogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, len(h.upperBounds))
+
+ his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
+ his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
+ var count uint64
+ for i, upperBound := range h.upperBounds {
+ count += atomic.LoadUint64(&h.counts[i])
+ buckets[i] = &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ }
+ }
+ his.Bucket = buckets
+ out.Histogram = his
+ out.Label = h.labelPairs
+ return nil
+}
+
+// HistogramVec is a Collector that bundles a set of Histograms that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewHistogramVec.
+type HistogramVec struct {
+ *MetricVec
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &HistogramVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newHistogram(desc, opts, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Histogram and not a
+// Metric so that no type conversion is required.
+func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Histogram and not a Metric so that no
+// type conversion is required.
+func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
+ return m.MetricVec.WithLabelValues(lvs...).(Histogram)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *HistogramVec) With(labels Labels) Histogram {
+ return m.MetricVec.With(labels).(Histogram)
+}
+
+type constHistogram struct {
+ desc *Desc
+ count uint64
+ sum float64
+ buckets map[float64]uint64
+ labelPairs []*dto.LabelPair
+}
+
+func (h *constHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *constHistogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, 0, len(h.buckets))
+
+ his.SampleCount = proto.Uint64(h.count)
+ his.SampleSum = proto.Float64(h.sum)
+
+ for upperBound, count := range h.buckets {
+ buckets = append(buckets, &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ })
+ }
+
+ if len(buckets) > 0 {
+ sort.Sort(buckSort(buckets))
+ }
+ his.Bucket = buckets
+
+ out.Histogram = his
+ out.Label = h.labelPairs
+
+ return nil
+}
+
+// NewConstHistogram returns a metric representing a Prometheus histogram with
+// fixed values for the count, sum, and bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
+// bucket.
+//
+// NewConstHistogram returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstHistogram is a version of NewConstHistogram that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type buckSort []*dto.Bucket
+
+func (s buckSort) Len() int {
+ return len(s)
+}
+
+func (s buckSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s buckSort) Less(i, j int) bool {
+ return s[i].GetUpperBound() < s[j].GetUpperBound()
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go
new file mode 100644
index 000000000..d1242e08d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go
@@ -0,0 +1,326 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "math"
+ "math/rand"
+ "reflect"
+ "sort"
+ "sync"
+ "testing"
+ "testing/quick"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+func benchmarkHistogramObserve(w int, b *testing.B) {
+ b.StopTimer()
+
+ wg := new(sync.WaitGroup)
+ wg.Add(w)
+
+ g := new(sync.WaitGroup)
+ g.Add(1)
+
+ s := NewHistogram(HistogramOpts{})
+
+ for i := 0; i < w; i++ {
+ go func() {
+ g.Wait()
+
+ for i := 0; i < b.N; i++ {
+ s.Observe(float64(i))
+ }
+
+ wg.Done()
+ }()
+ }
+
+ b.StartTimer()
+ g.Done()
+ wg.Wait()
+}
+
+func BenchmarkHistogramObserve1(b *testing.B) {
+ benchmarkHistogramObserve(1, b)
+}
+
+func BenchmarkHistogramObserve2(b *testing.B) {
+ benchmarkHistogramObserve(2, b)
+}
+
+func BenchmarkHistogramObserve4(b *testing.B) {
+ benchmarkHistogramObserve(4, b)
+}
+
+func BenchmarkHistogramObserve8(b *testing.B) {
+ benchmarkHistogramObserve(8, b)
+}
+
+func benchmarkHistogramWrite(w int, b *testing.B) {
+ b.StopTimer()
+
+ wg := new(sync.WaitGroup)
+ wg.Add(w)
+
+ g := new(sync.WaitGroup)
+ g.Add(1)
+
+ s := NewHistogram(HistogramOpts{})
+
+ for i := 0; i < 1000000; i++ {
+ s.Observe(float64(i))
+ }
+
+ for j := 0; j < w; j++ {
+ outs := make([]dto.Metric, b.N)
+
+ go func(o []dto.Metric) {
+ g.Wait()
+
+ for i := 0; i < b.N; i++ {
+ s.Write(&o[i])
+ }
+
+ wg.Done()
+ }(outs)
+ }
+
+ b.StartTimer()
+ g.Done()
+ wg.Wait()
+}
+
+func BenchmarkHistogramWrite1(b *testing.B) {
+ benchmarkHistogramWrite(1, b)
+}
+
+func BenchmarkHistogramWrite2(b *testing.B) {
+ benchmarkHistogramWrite(2, b)
+}
+
+func BenchmarkHistogramWrite4(b *testing.B) {
+ benchmarkHistogramWrite(4, b)
+}
+
+func BenchmarkHistogramWrite8(b *testing.B) {
+ benchmarkHistogramWrite(8, b)
+}
+
+// Intentionally adding +Inf here to test if that case is handled correctly.
+// Also, getCumulativeCounts depends on it.
+var testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)}
+
+func TestHistogramConcurrency(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping test in short mode.")
+ }
+
+ rand.Seed(42)
+
+ it := func(n uint32) bool {
+ mutations := int(n%1e4 + 1e4)
+ concLevel := int(n%5 + 1)
+ total := mutations * concLevel
+
+ var start, end sync.WaitGroup
+ start.Add(1)
+ end.Add(concLevel)
+
+ sum := NewHistogram(HistogramOpts{
+ Name: "test_histogram",
+ Help: "helpless",
+ Buckets: testBuckets,
+ })
+
+ allVars := make([]float64, total)
+ var sampleSum float64
+ for i := 0; i < concLevel; i++ {
+ vals := make([]float64, mutations)
+ for j := 0; j < mutations; j++ {
+ v := rand.NormFloat64()
+ vals[j] = v
+ allVars[i*mutations+j] = v
+ sampleSum += v
+ }
+
+ go func(vals []float64) {
+ start.Wait()
+ for _, v := range vals {
+ sum.Observe(v)
+ }
+ end.Done()
+ }(vals)
+ }
+ sort.Float64s(allVars)
+ start.Done()
+ end.Wait()
+
+ m := &dto.Metric{}
+ sum.Write(m)
+ if got, want := int(*m.Histogram.SampleCount), total; got != want {
+ t.Errorf("got sample count %d, want %d", got, want)
+ }
+ if got, want := *m.Histogram.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 {
+ t.Errorf("got sample sum %f, want %f", got, want)
+ }
+
+ wantCounts := getCumulativeCounts(allVars)
+
+ if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want {
+ t.Errorf("got %d buckets in protobuf, want %d", got, want)
+ }
+ for i, wantBound := range testBuckets {
+ if i == len(testBuckets)-1 {
+ break // No +Inf bucket in protobuf.
+ }
+ if gotBound := *m.Histogram.Bucket[i].UpperBound; gotBound != wantBound {
+ t.Errorf("got bound %f, want %f", gotBound, wantBound)
+ }
+ if gotCount, wantCount := *m.Histogram.Bucket[i].CumulativeCount, wantCounts[i]; gotCount != wantCount {
+ t.Errorf("got count %d, want %d", gotCount, wantCount)
+ }
+ }
+ return true
+ }
+
+ if err := quick.Check(it, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestHistogramVecConcurrency(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping test in short mode.")
+ }
+
+ rand.Seed(42)
+
+ objectives := make([]float64, 0, len(DefObjectives))
+ for qu := range DefObjectives {
+
+ objectives = append(objectives, qu)
+ }
+ sort.Float64s(objectives)
+
+ it := func(n uint32) bool {
+ mutations := int(n%1e4 + 1e4)
+ concLevel := int(n%7 + 1)
+ vecLength := int(n%3 + 1)
+
+ var start, end sync.WaitGroup
+ start.Add(1)
+ end.Add(concLevel)
+
+ his := NewHistogramVec(
+ HistogramOpts{
+ Name: "test_histogram",
+ Help: "helpless",
+ Buckets: []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)},
+ },
+ []string{"label"},
+ )
+
+ allVars := make([][]float64, vecLength)
+ sampleSums := make([]float64, vecLength)
+ for i := 0; i < concLevel; i++ {
+ vals := make([]float64, mutations)
+ picks := make([]int, mutations)
+ for j := 0; j < mutations; j++ {
+ v := rand.NormFloat64()
+ vals[j] = v
+ pick := rand.Intn(vecLength)
+ picks[j] = pick
+ allVars[pick] = append(allVars[pick], v)
+ sampleSums[pick] += v
+ }
+
+ go func(vals []float64) {
+ start.Wait()
+ for i, v := range vals {
+ his.WithLabelValues(string('A' + picks[i])).Observe(v)
+ }
+ end.Done()
+ }(vals)
+ }
+ for _, vars := range allVars {
+ sort.Float64s(vars)
+ }
+ start.Done()
+ end.Wait()
+
+ for i := 0; i < vecLength; i++ {
+ m := &dto.Metric{}
+ s := his.WithLabelValues(string('A' + i))
+ s.Write(m)
+
+ if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want {
+ t.Errorf("got %d buckets in protobuf, want %d", got, want)
+ }
+ if got, want := int(*m.Histogram.SampleCount), len(allVars[i]); got != want {
+ t.Errorf("got sample count %d, want %d", got, want)
+ }
+ if got, want := *m.Histogram.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 {
+ t.Errorf("got sample sum %f, want %f", got, want)
+ }
+
+ wantCounts := getCumulativeCounts(allVars[i])
+
+ for j, wantBound := range testBuckets {
+ if j == len(testBuckets)-1 {
+ break // No +Inf bucket in protobuf.
+ }
+ if gotBound := *m.Histogram.Bucket[j].UpperBound; gotBound != wantBound {
+ t.Errorf("got bound %f, want %f", gotBound, wantBound)
+ }
+ if gotCount, wantCount := *m.Histogram.Bucket[j].CumulativeCount, wantCounts[j]; gotCount != wantCount {
+ t.Errorf("got count %d, want %d", gotCount, wantCount)
+ }
+ }
+ }
+ return true
+ }
+
+ if err := quick.Check(it, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+func getCumulativeCounts(vars []float64) []uint64 {
+ counts := make([]uint64, len(testBuckets))
+ for _, v := range vars {
+ for i := len(testBuckets) - 1; i >= 0; i-- {
+ if v > testBuckets[i] {
+ break
+ }
+ counts[i]++
+ }
+ }
+ return counts
+}
+
+func TestBuckets(t *testing.T) {
+ got := LinearBuckets(-15, 5, 6)
+ want := []float64{-15, -10, -5, 0, 5, 10}
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("linear buckets: got %v, want %v", got, want)
+ }
+
+ got = ExponentialBuckets(100, 1.2, 3)
+ want = []float64{100, 120, 144}
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("linear buckets: got %v, want %v", got, want)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go
new file mode 100644
index 000000000..67ee5ac79
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go
@@ -0,0 +1,490 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/prometheus/common/expfmt"
+)
+
+// TODO(beorn7): Remove this whole file. It is a partial mirror of
+// promhttp/http.go (to avoid circular import chains) where everything HTTP
+// related should live. The functions here are just for avoiding
+// breakage. Everything is deprecated.
+
+const (
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+)
+
+var bufPool sync.Pool
+
+func getBuf() *bytes.Buffer {
+ buf := bufPool.Get()
+ if buf == nil {
+ return &bytes.Buffer{}
+ }
+ return buf.(*bytes.Buffer)
+}
+
+func giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ bufPool.Put(buf)
+}
+
+// Handler returns an HTTP handler for the DefaultGatherer. It is
+// already instrumented with InstrumentHandler (using "prometheus" as handler
+// name).
+//
+// Deprecated: Please note the issues described in the doc comment of
+// InstrumentHandler. You might want to consider using promhttp.Handler instead
+// (which is non instrumented).
+func Handler() http.Handler {
+ return InstrumentHandler("prometheus", UninstrumentedHandler())
+}
+
+// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
+//
+// Deprecated: Use promhttp.Handler instead. See there for further documentation.
+func UninstrumentedHandler() http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ mfs, err := DefaultGatherer.Gather()
+ if err != nil {
+ http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ contentType := expfmt.Negotiate(req.Header)
+ buf := getBuf()
+ defer giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf)
+ enc := expfmt.NewEncoder(writer, contentType)
+ var lastErr error
+ for _, mf := range mfs {
+ if err := enc.Encode(mf); err != nil {
+ lastErr = err
+ http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ if lastErr != nil && buf.Len() == 0 {
+ http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, string(contentType))
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ w.Write(buf.Bytes())
+ })
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part := strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
+
+var instLabels = []string{"method", "code"}
+
+type nower interface {
+ Now() time.Time
+}
+
+type nowFunc func() time.Time
+
+func (n nowFunc) Now() time.Time {
+ return n()
+}
+
+var now nower = nowFunc(func() time.Time {
+ return time.Now()
+})
+
+func nowSeries(t ...time.Time) nower {
+ return nowFunc(func() time.Time {
+ defer func() {
+ t = t[1:]
+ }()
+
+ return t[0]
+ })
+}
+
+// InstrumentHandler wraps the given HTTP handler for instrumentation. It
+// registers four metric collectors (if not already done) and reports HTTP
+// metrics to the (newly or already) registered collectors: http_requests_total
+// (CounterVec), http_request_duration_microseconds (Summary),
+// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
+// has a constant label named "handler" with the provided handlerName as
+// value. http_requests_total is a metric vector partitioned by HTTP method
+// (label name "method") and HTTP status code (label name "code").
+//
+// Deprecated: InstrumentHandler has several issues:
+//
+// - It uses Summaries rather than Histograms. Summaries are not useful if
+// aggregation across multiple instances is required.
+//
+// - It uses microseconds as unit, which is deprecated and should be replaced by
+// seconds.
+//
+// - The size of the request is calculated in a separate goroutine. Since this
+// calculator requires access to the request header, it creates a race with
+// any writes to the header performed during request handling.
+// httputil.ReverseProxy is a prominent example for a handler
+// performing such writes.
+//
+// Upcoming versions of this package will provide ways of instrumenting HTTP
+// handlers that are more flexible and have fewer issues. Please prefer direct
+// instrumentation in the meantime.
+func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFunc wraps the given function for instrumentation. It
+// otherwise works in the same way as InstrumentHandler (and shares the same
+// issues).
+//
+// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
+// InstrumentHandler is.
+func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(
+ SummaryOpts{
+ Subsystem: "http",
+ ConstLabels: Labels{"handler": handlerName},
+ },
+ handlerFunc,
+ )
+}
+
+// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
+// issues) but provides more flexibility (at the cost of a more complex call
+// syntax). As InstrumentHandler, this function registers four metric
+// collectors, but it uses the provided SummaryOpts to create them. However, the
+// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
+// by "requests_total", "request_duration_microseconds", "request_size_bytes",
+// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
+// help string. The names of the variable labels of the http_requests_total
+// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
+//
+// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
+// behavior of InstrumentHandler:
+//
+// prometheus.InstrumentHandlerWithOpts(
+// prometheus.SummaryOpts{
+// Subsystem: "http",
+// ConstLabels: prometheus.Labels{"handler": handlerName},
+// },
+// handler,
+// )
+//
+// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
+// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
+// and all its fields are set to the equally named fields in the provided
+// SummaryOpts.
+//
+// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
+// InstrumentHandler is.
+func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
+// the same issues) but provides more flexibility (at the cost of a more complex
+// call syntax). See InstrumentHandlerWithOpts for details how the provided
+// SummaryOpts are used.
+//
+// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
+// as InstrumentHandler is.
+func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ reqCnt := NewCounterVec(
+ CounterOpts{
+ Namespace: opts.Namespace,
+ Subsystem: opts.Subsystem,
+ Name: "requests_total",
+ Help: "Total number of HTTP requests made.",
+ ConstLabels: opts.ConstLabels,
+ },
+ instLabels,
+ )
+
+ opts.Name = "request_duration_microseconds"
+ opts.Help = "The HTTP request latencies in microseconds."
+ reqDur := NewSummary(opts)
+
+ opts.Name = "request_size_bytes"
+ opts.Help = "The HTTP request sizes in bytes."
+ reqSz := NewSummary(opts)
+
+ opts.Name = "response_size_bytes"
+ opts.Help = "The HTTP response sizes in bytes."
+ resSz := NewSummary(opts)
+
+ regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec)
+ regReqDur := MustRegisterOrGet(reqDur).(Summary)
+ regReqSz := MustRegisterOrGet(reqSz).(Summary)
+ regResSz := MustRegisterOrGet(resSz).(Summary)
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+
+ delegate := &responseWriterDelegator{ResponseWriter: w}
+ out := make(chan int)
+ urlLen := 0
+ if r.URL != nil {
+ urlLen = len(r.URL.String())
+ }
+ go computeApproximateRequestSize(r, out, urlLen)
+
+ _, cn := w.(http.CloseNotifier)
+ _, fl := w.(http.Flusher)
+ _, hj := w.(http.Hijacker)
+ _, rf := w.(io.ReaderFrom)
+ var rw http.ResponseWriter
+ if cn && fl && hj && rf {
+ rw = &fancyResponseWriterDelegator{delegate}
+ } else {
+ rw = delegate
+ }
+ handlerFunc(rw, r)
+
+ elapsed := float64(time.Since(now)) / float64(time.Microsecond)
+
+ method := sanitizeMethod(r.Method)
+ code := sanitizeCode(delegate.status)
+ regReqCnt.WithLabelValues(method, code).Inc()
+ regReqDur.Observe(elapsed)
+ regResSz.Observe(float64(delegate.written))
+ regReqSz.Observe(float64(<-out))
+ })
+}
+
+func computeApproximateRequestSize(r *http.Request, out chan int, s int) {
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ out <- s
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ handler, method string
+ status int
+ written int64
+ wroteHeader bool
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type fancyResponseWriterDelegator struct {
+ *responseWriterDelegator
+}
+
+func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
+ return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+func (f *fancyResponseWriterDelegator) Flush() {
+ f.ResponseWriter.(http.Flusher).Flush()
+}
+
+func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return f.ResponseWriter.(http.Hijacker).Hijack()
+}
+
+func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
+ if !f.wroteHeader {
+ f.WriteHeader(http.StatusOK)
+ }
+ n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
+ f.written += n
+ return n, err
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http_test.go b/vendor/github.com/prometheus/client_golang/prometheus/http_test.go
new file mode 100644
index 000000000..ffe0418cf
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/http_test.go
@@ -0,0 +1,121 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+type respBody string
+
+func (b respBody) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusTeapot)
+ w.Write([]byte(b))
+}
+
+func TestInstrumentHandler(t *testing.T) {
+ defer func(n nower) {
+ now = n.(nower)
+ }(now)
+
+ instant := time.Now()
+ end := instant.Add(30 * time.Second)
+ now = nowSeries(instant, end)
+ respBody := respBody("Howdy there!")
+
+ hndlr := InstrumentHandler("test-handler", respBody)
+
+ opts := SummaryOpts{
+ Subsystem: "http",
+ ConstLabels: Labels{"handler": "test-handler"},
+ }
+
+ reqCnt := MustRegisterOrGet(NewCounterVec(
+ CounterOpts{
+ Namespace: opts.Namespace,
+ Subsystem: opts.Subsystem,
+ Name: "requests_total",
+ Help: "Total number of HTTP requests made.",
+ ConstLabels: opts.ConstLabels,
+ },
+ instLabels,
+ )).(*CounterVec)
+
+ opts.Name = "request_duration_microseconds"
+ opts.Help = "The HTTP request latencies in microseconds."
+ reqDur := MustRegisterOrGet(NewSummary(opts)).(Summary)
+
+ opts.Name = "request_size_bytes"
+ opts.Help = "The HTTP request sizes in bytes."
+ MustRegisterOrGet(NewSummary(opts))
+
+ opts.Name = "response_size_bytes"
+ opts.Help = "The HTTP response sizes in bytes."
+ MustRegisterOrGet(NewSummary(opts))
+
+ reqCnt.Reset()
+
+ resp := httptest.NewRecorder()
+ req := &http.Request{
+ Method: "GET",
+ }
+
+ hndlr.ServeHTTP(resp, req)
+
+ if resp.Code != http.StatusTeapot {
+ t.Fatalf("expected status %d, got %d", http.StatusTeapot, resp.Code)
+ }
+ if string(resp.Body.Bytes()) != "Howdy there!" {
+ t.Fatalf("expected body %s, got %s", "Howdy there!", string(resp.Body.Bytes()))
+ }
+
+ out := &dto.Metric{}
+ reqDur.Write(out)
+ if want, got := "test-handler", out.Label[0].GetValue(); want != got {
+ t.Errorf("want label value %q in reqDur, got %q", want, got)
+ }
+ if want, got := uint64(1), out.Summary.GetSampleCount(); want != got {
+ t.Errorf("want sample count %d in reqDur, got %d", want, got)
+ }
+
+ out.Reset()
+ if want, got := 1, len(reqCnt.children); want != got {
+ t.Errorf("want %d children in reqCnt, got %d", want, got)
+ }
+ cnt, err := reqCnt.GetMetricWithLabelValues("get", "418")
+ if err != nil {
+ t.Fatal(err)
+ }
+ cnt.Write(out)
+ if want, got := "418", out.Label[0].GetValue(); want != got {
+ t.Errorf("want label value %q in reqCnt, got %q", want, got)
+ }
+ if want, got := "test-handler", out.Label[1].GetValue(); want != got {
+ t.Errorf("want label value %q in reqCnt, got %q", want, got)
+ }
+ if want, got := "get", out.Label[2].GetValue(); want != got {
+ t.Errorf("want label value %q in reqCnt, got %q", want, got)
+ }
+ if out.Counter == nil {
+ t.Fatal("expected non-nil counter in reqCnt")
+ }
+ if want, got := 1., out.Counter.GetValue(); want != got {
+ t.Errorf("want reqCnt of %f, got %f", want, got)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
new file mode 100644
index 000000000..d4063d98f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -0,0 +1,166 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+const separatorByte byte = 255
+
+// A Metric models a single sample value with its meta data being exported to
+// Prometheus. Implementations of Metric in this package are Gauge, Counter,
+// Histogram, Summary, and Untyped.
+type Metric interface {
+ // Desc returns the descriptor for the Metric. This method idempotently
+ // returns the same descriptor throughout the lifetime of the
+ // Metric. The returned descriptor is immutable by contract. A Metric
+ // unable to describe itself must return an invalid descriptor (created
+ // with NewInvalidDesc).
+ Desc() *Desc
+ // Write encodes the Metric into a "Metric" Protocol Buffer data
+ // transmission object.
+ //
+ // Metric implementations must observe concurrency safety as reads of
+ // this metric may occur at any time, and any blocking occurs at the
+ // expense of total performance of rendering all registered
+ // metrics. Ideally, Metric implementations should support concurrent
+ // readers.
+ //
+ // While populating dto.Metric, it is the responsibility of the
+ // implementation to ensure validity of the Metric protobuf (like valid
+ // UTF-8 strings or syntactically valid metric and label names). It is
+ // recommended to sort labels lexicographically. (Implementers may find
+ // LabelPairSorter useful for that.) Callers of Write should still make
+ // sure of sorting if they depend on it.
+ Write(*dto.Metric) error
+ // TODO(beorn7): The original rationale of passing in a pre-allocated
+ // dto.Metric protobuf to save allocations has disappeared. The
+ // signature of this method should be changed to "Write() (*dto.Metric,
+ // error)".
+}
+
+// Opts bundles the options for creating most Metric types. Each metric
+// implementation XXX has its own XXXOpts type, but in most cases, it is just be
+// an alias of this type (which might change when the requirement arises.)
+//
+// It is mandatory to set Name and Help to a non-empty string. All other fields
+// are optional and can safely be left at their zero value.
+type Opts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Metric (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the metric must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this metric. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a metric
+ // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
+ // serve only special purposes. One is for the special case where the
+ // value of a label does not change during the lifetime of a process,
+ // e.g. if the revision of the running binary is put into a
+ // label. Another, more advanced purpose is if more than one Collector
+ // needs to collect Metrics with the same fully-qualified name. In that
+ // case, those Metrics must differ in the values of their
+ // ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+}
+
+// BuildFQName joins the given three name components by "_". Empty name
+// components are ignored. If the name parameter itself is empty, an empty
+// string is returned, no matter what. Metric implementations included in this
+// library use this function internally to generate the fully-qualified metric
+// name from the name component in their Opts. Users of the library will only
+// need this function if they implement their own Metric or instantiate a Desc
+// (with NewDesc) directly.
+func BuildFQName(namespace, subsystem, name string) string {
+ if name == "" {
+ return ""
+ }
+ switch {
+ case namespace != "" && subsystem != "":
+ return strings.Join([]string{namespace, subsystem, name}, "_")
+ case namespace != "":
+ return strings.Join([]string{namespace, name}, "_")
+ case subsystem != "":
+ return strings.Join([]string{subsystem, name}, "_")
+ }
+ return name
+}
+
+// LabelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers. This is useful for implementing the Write method of
+// custom metrics.
+type LabelPairSorter []*dto.LabelPair
+
+func (s LabelPairSorter) Len() int {
+ return len(s)
+}
+
+func (s LabelPairSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s LabelPairSorter) Less(i, j int) bool {
+ return s[i].GetName() < s[j].GetName()
+}
+
+type hashSorter []uint64
+
+func (s hashSorter) Len() int {
+ return len(s)
+}
+
+func (s hashSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s hashSorter) Less(i, j int) bool {
+ return s[i] < s[j]
+}
+
+type invalidMetric struct {
+ desc *Desc
+ err error
+}
+
+// NewInvalidMetric returns a metric whose Write method always returns the
+// provided error. It is useful if a Collector finds itself unable to collect
+// a metric and wishes to report an error to the registry.
+func NewInvalidMetric(desc *Desc, err error) Metric {
+ return &invalidMetric{desc, err}
+}
+
+func (m *invalidMetric) Desc() *Desc { return m.desc }
+
+func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric_test.go b/vendor/github.com/prometheus/client_golang/prometheus/metric_test.go
new file mode 100644
index 000000000..7145f5e53
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric_test.go
@@ -0,0 +1,35 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "testing"
+
+func TestBuildFQName(t *testing.T) {
+ scenarios := []struct{ namespace, subsystem, name, result string }{
+ {"a", "b", "c", "a_b_c"},
+ {"", "b", "c", "b_c"},
+ {"a", "", "c", "a_c"},
+ {"", "", "c", "c"},
+ {"a", "b", "", ""},
+ {"a", "", "", ""},
+ {"", "b", "", ""},
+ {" ", "", "", ""},
+ }
+
+ for i, s := range scenarios {
+ if want, got := s.result, BuildFQName(s.namespace, s.subsystem, s.name); want != got {
+ t.Errorf("%d. want %s, got %s", i, want, got)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
new file mode 100644
index 000000000..e31e62e78
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -0,0 +1,142 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "github.com/prometheus/procfs"
+
+type processCollector struct {
+ pid int
+ collectFn func(chan<- Metric)
+ pidFn func() (int, error)
+ cpuTotal Counter
+ openFDs, maxFDs Gauge
+ vsize, rss Gauge
+ startTime Gauge
+}
+
+// NewProcessCollector returns a collector which exports the current state of
+// process metrics including cpu, memory and file descriptor usage as well as
+// the process start time for the given process id under the given namespace.
+func NewProcessCollector(pid int, namespace string) Collector {
+ return NewProcessCollectorPIDFn(
+ func() (int, error) { return pid, nil },
+ namespace,
+ )
+}
+
+// NewProcessCollectorPIDFn returns a collector which exports the current state
+// of process metrics including cpu, memory and file descriptor usage as well
+// as the process start time under the given namespace. The given pidFn is
+// called on each collect and is used to determine the process to export
+// metrics for.
+func NewProcessCollectorPIDFn(
+ pidFn func() (int, error),
+ namespace string,
+) Collector {
+ c := processCollector{
+ pidFn: pidFn,
+ collectFn: func(chan<- Metric) {},
+
+ cpuTotal: NewCounter(CounterOpts{
+ Namespace: namespace,
+ Name: "process_cpu_seconds_total",
+ Help: "Total user and system CPU time spent in seconds.",
+ }),
+ openFDs: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_open_fds",
+ Help: "Number of open file descriptors.",
+ }),
+ maxFDs: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_max_fds",
+ Help: "Maximum number of open file descriptors.",
+ }),
+ vsize: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_virtual_memory_bytes",
+ Help: "Virtual memory size in bytes.",
+ }),
+ rss: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_resident_memory_bytes",
+ Help: "Resident memory size in bytes.",
+ }),
+ startTime: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_start_time_seconds",
+ Help: "Start time of the process since unix epoch in seconds.",
+ }),
+ }
+
+ // Set up process metric collection if supported by the runtime.
+ if _, err := procfs.NewStat(); err == nil {
+ c.collectFn = c.processCollect
+ }
+
+ return &c
+}
+
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal.Desc()
+ ch <- c.openFDs.Desc()
+ ch <- c.maxFDs.Desc()
+ ch <- c.vsize.Desc()
+ ch <- c.rss.Desc()
+ ch <- c.startTime.Desc()
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *processCollector) Collect(ch chan<- Metric) {
+ c.collectFn(ch)
+}
+
+// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
+// client allows users to configure the error behavior.
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ pid, err := c.pidFn()
+ if err != nil {
+ return
+ }
+
+ p, err := procfs.NewProc(pid)
+ if err != nil {
+ return
+ }
+
+ if stat, err := p.NewStat(); err == nil {
+ c.cpuTotal.Set(stat.CPUTime())
+ ch <- c.cpuTotal
+ c.vsize.Set(float64(stat.VirtualMemory()))
+ ch <- c.vsize
+ c.rss.Set(float64(stat.ResidentMemory()))
+ ch <- c.rss
+
+ if startTime, err := stat.StartTime(); err == nil {
+ c.startTime.Set(startTime)
+ ch <- c.startTime
+ }
+ }
+
+ if fds, err := p.FileDescriptorsLen(); err == nil {
+ c.openFDs.Set(float64(fds))
+ ch <- c.openFDs
+ }
+
+ if limits, err := p.NewLimits(); err == nil {
+ c.maxFDs.Set(float64(limits.OpenFiles))
+ ch <- c.maxFDs
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go
new file mode 100644
index 000000000..d3362dae7
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go
@@ -0,0 +1,58 @@
+package prometheus
+
+import (
+ "bytes"
+ "os"
+ "regexp"
+ "testing"
+
+ "github.com/prometheus/common/expfmt"
+ "github.com/prometheus/procfs"
+)
+
+func TestProcessCollector(t *testing.T) {
+ if _, err := procfs.Self(); err != nil {
+ t.Skipf("skipping TestProcessCollector, procfs not available: %s", err)
+ }
+
+ registry := NewRegistry()
+ if err := registry.Register(NewProcessCollector(os.Getpid(), "")); err != nil {
+ t.Fatal(err)
+ }
+ if err := registry.Register(NewProcessCollectorPIDFn(
+ func() (int, error) { return os.Getpid(), nil }, "foobar"),
+ ); err != nil {
+ t.Fatal(err)
+ }
+
+ mfs, err := registry.Gather()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var buf bytes.Buffer
+ for _, mf := range mfs {
+ if _, err := expfmt.MetricFamilyToText(&buf, mf); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ for _, re := range []*regexp.Regexp{
+ regexp.MustCompile("process_cpu_seconds_total [0-9]"),
+ regexp.MustCompile("process_max_fds [1-9]"),
+ regexp.MustCompile("process_open_fds [1-9]"),
+ regexp.MustCompile("process_virtual_memory_bytes [1-9]"),
+ regexp.MustCompile("process_resident_memory_bytes [1-9]"),
+ regexp.MustCompile("process_start_time_seconds [0-9.]{10,}"),
+ regexp.MustCompile("foobar_process_cpu_seconds_total [0-9]"),
+ regexp.MustCompile("foobar_process_max_fds [1-9]"),
+ regexp.MustCompile("foobar_process_open_fds [1-9]"),
+ regexp.MustCompile("foobar_process_virtual_memory_bytes [1-9]"),
+ regexp.MustCompile("foobar_process_resident_memory_bytes [1-9]"),
+ regexp.MustCompile("foobar_process_start_time_seconds [0-9.]{10,}"),
+ } {
+ if !re.Match(buf.Bytes()) {
+ t.Errorf("want body to match %s\n%s", re, buf.String())
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
new file mode 100644
index 000000000..b6dd5a266
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -0,0 +1,201 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+// Package promhttp contains functions to create http.Handler instances to
+// expose Prometheus metrics via HTTP. In later versions of this package, it
+// will also contain tooling to instrument instances of http.Handler and
+// http.RoundTripper.
+//
+// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor,
+// you can create a handler for a custom registry or anything that implements
+// the Gatherer interface. It also allows to create handlers that act
+// differently on errors or allow to log errors.
+package promhttp
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "sync"
+
+ "github.com/prometheus/common/expfmt"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+)
+
+var bufPool sync.Pool
+
+func getBuf() *bytes.Buffer {
+ buf := bufPool.Get()
+ if buf == nil {
+ return &bytes.Buffer{}
+ }
+ return buf.(*bytes.Buffer)
+}
+
+func giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ bufPool.Put(buf)
+}
+
+// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The
+// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP
+// error, no error logging, and compression if requested by the client.
+//
+// If you want to create a Handler for the DefaultGatherer with different
+// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and
+// your desired HandlerOpts.
+func Handler() http.Handler {
+ return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{})
+}
+
+// HandlerFor returns an http.Handler for the provided Gatherer. The behavior
+// of the Handler is defined by the provided HandlerOpts.
+func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ mfs, err := reg.Gather()
+ if err != nil {
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error gathering metrics:", err)
+ }
+ switch opts.ErrorHandling {
+ case PanicOnError:
+ panic(err)
+ case ContinueOnError:
+ if len(mfs) == 0 {
+ http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ case HTTPErrorOnError:
+ http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+
+ contentType := expfmt.Negotiate(req.Header)
+ buf := getBuf()
+ defer giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf, opts.DisableCompression)
+ enc := expfmt.NewEncoder(writer, contentType)
+ var lastErr error
+ for _, mf := range mfs {
+ if err := enc.Encode(mf); err != nil {
+ lastErr = err
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error encoding metric family:", err)
+ }
+ switch opts.ErrorHandling {
+ case PanicOnError:
+ panic(err)
+ case ContinueOnError:
+ // Handled later.
+ case HTTPErrorOnError:
+ http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ if lastErr != nil && buf.Len() == 0 {
+ http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, string(contentType))
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ w.Write(buf.Bytes())
+ // TODO(beorn7): Consider streaming serving of metrics.
+ })
+}
+
+// HandlerErrorHandling defines how a Handler serving metrics will handle
+// errors.
+type HandlerErrorHandling int
+
+// These constants cause handlers serving metrics to behave as described if
+// errors are encountered.
+const (
+ // Serve an HTTP status code 500 upon the first error
+ // encountered. Report the error message in the body.
+ HTTPErrorOnError HandlerErrorHandling = iota
+ // Ignore errors and try to serve as many metrics as possible. However,
+ // if no metrics can be served, serve an HTTP status code 500 and the
+ // last error message in the body. Only use this in deliberate "best
+ // effort" metrics collection scenarios. It is recommended to at least
+ // log errors (by providing an ErrorLog in HandlerOpts) to not mask
+ // errors completely.
+ ContinueOnError
+ // Panic upon the first error encountered (useful for "crash only" apps).
+ PanicOnError
+)
+
+// Logger is the minimal interface HandlerOpts needs for logging. Note that
+// log.Logger from the standard library implements this interface, and it is
+// easy to implement by custom loggers, if they don't do so already anyway.
+type Logger interface {
+ Println(v ...interface{})
+}
+
+// HandlerOpts specifies options how to serve metrics via an http.Handler. The
+// zero value of HandlerOpts is a reasonable default.
+type HandlerOpts struct {
+ // ErrorLog specifies an optional logger for errors collecting and
+ // serving metrics. If nil, errors are not logged at all.
+ ErrorLog Logger
+ // ErrorHandling defines how errors are handled. Note that errors are
+ // logged regardless of the configured ErrorHandling provided ErrorLog
+ // is not nil.
+ ErrorHandling HandlerErrorHandling
+ // If DisableCompression is true, the handler will never compress the
+ // response, even if requested by the client.
+ DisableCompression bool
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) {
+ if compressionDisabled {
+ return writer, ""
+ }
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part := strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go
new file mode 100644
index 000000000..d4a7d4a7b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go
@@ -0,0 +1,137 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package promhttp
+
+import (
+ "bytes"
+ "errors"
+ "log"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type errorCollector struct{}
+
+func (e errorCollector) Describe(ch chan<- *prometheus.Desc) {
+ ch <- prometheus.NewDesc("invalid_metric", "not helpful", nil, nil)
+}
+
+func (e errorCollector) Collect(ch chan<- prometheus.Metric) {
+ ch <- prometheus.NewInvalidMetric(
+ prometheus.NewDesc("invalid_metric", "not helpful", nil, nil),
+ errors.New("collect error"),
+ )
+}
+
+func TestHandlerErrorHandling(t *testing.T) {
+
+ // Create a registry that collects a MetricFamily with two elements,
+ // another with one, and reports an error.
+ reg := prometheus.NewRegistry()
+
+ cnt := prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "the_count",
+ Help: "Ah-ah-ah! Thunder and lightning!",
+ })
+ reg.MustRegister(cnt)
+
+ cntVec := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "name",
+ Help: "docstring",
+ ConstLabels: prometheus.Labels{"constname": "constvalue"},
+ },
+ []string{"labelname"},
+ )
+ cntVec.WithLabelValues("val1").Inc()
+ cntVec.WithLabelValues("val2").Inc()
+ reg.MustRegister(cntVec)
+
+ reg.MustRegister(errorCollector{})
+
+ logBuf := &bytes.Buffer{}
+ logger := log.New(logBuf, "", 0)
+
+ writer := httptest.NewRecorder()
+ request, _ := http.NewRequest("GET", "/", nil)
+ request.Header.Add("Accept", "test/plain")
+
+ errorHandler := HandlerFor(reg, HandlerOpts{
+ ErrorLog: logger,
+ ErrorHandling: HTTPErrorOnError,
+ })
+ continueHandler := HandlerFor(reg, HandlerOpts{
+ ErrorLog: logger,
+ ErrorHandling: ContinueOnError,
+ })
+ panicHandler := HandlerFor(reg, HandlerOpts{
+ ErrorLog: logger,
+ ErrorHandling: PanicOnError,
+ })
+ wantMsg := `error gathering metrics: error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error
+`
+ wantErrorBody := `An error has occurred during metrics gathering:
+
+error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error
+`
+ wantOKBody := `# HELP name docstring
+# TYPE name counter
+name{constname="constvalue",labelname="val1"} 1
+name{constname="constvalue",labelname="val2"} 1
+# HELP the_count Ah-ah-ah! Thunder and lightning!
+# TYPE the_count counter
+the_count 0
+`
+
+ errorHandler.ServeHTTP(writer, request)
+ if got, want := writer.Code, http.StatusInternalServerError; got != want {
+ t.Errorf("got HTTP status code %d, want %d", got, want)
+ }
+ if got := logBuf.String(); got != wantMsg {
+ t.Errorf("got log message:\n%s\nwant log mesage:\n%s\n", got, wantMsg)
+ }
+ if got := writer.Body.String(); got != wantErrorBody {
+ t.Errorf("got body:\n%s\nwant body:\n%s\n", got, wantErrorBody)
+ }
+ logBuf.Reset()
+ writer.Body.Reset()
+ writer.Code = http.StatusOK
+
+ continueHandler.ServeHTTP(writer, request)
+ if got, want := writer.Code, http.StatusOK; got != want {
+ t.Errorf("got HTTP status code %d, want %d", got, want)
+ }
+ if got := logBuf.String(); got != wantMsg {
+ t.Errorf("got log message %q, want %q", got, wantMsg)
+ }
+ if got := writer.Body.String(); got != wantOKBody {
+ t.Errorf("got body %q, want %q", got, wantOKBody)
+ }
+
+ defer func() {
+ if err := recover(); err == nil {
+ t.Error("expected panic from panicHandler")
+ }
+ }()
+ panicHandler.ServeHTTP(writer, request)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go b/vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go
new file mode 100644
index 000000000..7f17ca291
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go
@@ -0,0 +1,56 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package push_test
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/push"
+)
+
+func ExampleCollectors() {
+ completionTime := prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "db_backup_last_completion_timestamp_seconds",
+ Help: "The timestamp of the last succesful completion of a DB backup.",
+ })
+ completionTime.Set(float64(time.Now().Unix()))
+ if err := push.Collectors(
+ "db_backup", push.HostnameGroupingKey(),
+ "http://pushgateway:9091",
+ completionTime,
+ ); err != nil {
+ fmt.Println("Could not push completion time to Pushgateway:", err)
+ }
+}
+
+func ExampleRegistry() {
+ registry := prometheus.NewRegistry()
+
+ completionTime := prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "db_backup_last_completion_timestamp_seconds",
+ Help: "The timestamp of the last succesful completion of a DB backup.",
+ })
+ registry.MustRegister(completionTime)
+
+ completionTime.Set(float64(time.Now().Unix()))
+ if err := push.FromGatherer(
+ "db_backup", push.HostnameGroupingKey(),
+ "http://pushgateway:9091",
+ registry,
+ ); err != nil {
+ fmt.Println("Could not push completion time to Pushgateway:", err)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/push.go b/vendor/github.com/prometheus/client_golang/prometheus/push/push.go
new file mode 100644
index 000000000..ae40402f8
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/push/push.go
@@ -0,0 +1,172 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+// Package push provides functions to push metrics to a Pushgateway. The metrics
+// to push are either collected from a provided registry, or from explicitly
+// listed collectors.
+//
+// See the documentation of the Pushgateway to understand the meaning of the
+// grouping parameters and the differences between push.Registry and
+// push.Collectors on the one hand and push.AddRegistry and push.AddCollectors
+// on the other hand: https://github.com/prometheus/pushgateway
+package push
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+
+ "github.com/prometheus/common/expfmt"
+ "github.com/prometheus/common/model"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const contentTypeHeader = "Content-Type"
+
+// FromGatherer triggers a metric collection by the provided Gatherer (which is
+// usually implemented by a prometheus.Registry) and pushes all gathered metrics
+// to the Pushgateway specified by url, using the provided job name and the
+// (optional) further grouping labels (the grouping map may be nil). See the
+// Pushgateway documentation for detailed implications of the job and other
+// grouping labels. Neither the job name nor any grouping label value may
+// contain a "/". The metrics pushed must not contain a job label of their own
+// nor any of the grouping labels.
+//
+// You can use just host:port or ip:port as url, in which case 'http://' is
+// added automatically. You can also include the schema in the URL. However, do
+// not include the '/metrics/jobs/...' part.
+//
+// Note that all previously pushed metrics with the same job and other grouping
+// labels will be replaced with the metrics pushed by this call. (It uses HTTP
+// method 'PUT' to push to the Pushgateway.)
+func FromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error {
+ return push(job, grouping, url, g, "PUT")
+}
+
+// AddFromGatherer works like FromGatherer, but only previously pushed metrics
+// with the same name (and the same job and other grouping labels) will be
+// replaced. (It uses HTTP method 'POST' to push to the Pushgateway.)
+func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error {
+ return push(job, grouping, url, g, "POST")
+}
+
+func push(job string, grouping map[string]string, pushURL string, g prometheus.Gatherer, method string) error {
+ if !strings.Contains(pushURL, "://") {
+ pushURL = "http://" + pushURL
+ }
+ if strings.HasSuffix(pushURL, "/") {
+ pushURL = pushURL[:len(pushURL)-1]
+ }
+
+ if strings.Contains(job, "/") {
+ return fmt.Errorf("job contains '/': %s", job)
+ }
+ urlComponents := []string{url.QueryEscape(job)}
+ for ln, lv := range grouping {
+ if !model.LabelNameRE.MatchString(ln) {
+ return fmt.Errorf("grouping label has invalid name: %s", ln)
+ }
+ if strings.Contains(lv, "/") {
+ return fmt.Errorf("value of grouping label %s contains '/': %s", ln, lv)
+ }
+ urlComponents = append(urlComponents, ln, lv)
+ }
+ pushURL = fmt.Sprintf("%s/metrics/job/%s", pushURL, strings.Join(urlComponents, "/"))
+
+ mfs, err := g.Gather()
+ if err != nil {
+ return err
+ }
+ buf := &bytes.Buffer{}
+ enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
+ // Check for pre-existing grouping labels:
+ for _, mf := range mfs {
+ for _, m := range mf.GetMetric() {
+ for _, l := range m.GetLabel() {
+ if l.GetName() == "job" {
+ return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m)
+ }
+ if _, ok := grouping[l.GetName()]; ok {
+ return fmt.Errorf(
+ "pushed metric %s (%s) already contains grouping label %s",
+ mf.GetName(), m, l.GetName(),
+ )
+ }
+ }
+ }
+ enc.Encode(mf)
+ }
+ req, err := http.NewRequest(method, pushURL, buf)
+ if err != nil {
+ return err
+ }
+ req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim))
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 202 {
+ body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only.
+ return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, pushURL, body)
+ }
+ return nil
+}
+
+// Collectors works like FromGatherer, but it does not use a Gatherer. Instead,
+// it collects from the provided collectors directly. It is a convenient way to
+// push only a few metrics.
+func Collectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
+ return pushCollectors(job, grouping, url, "PUT", collectors...)
+}
+
+// AddCollectors works like AddFromGatherer, but it does not use a Gatherer.
+// Instead, it collects from the provided collectors directly. It is a
+// convenient way to push only a few metrics.
+func AddCollectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
+ return pushCollectors(job, grouping, url, "POST", collectors...)
+}
+
+func pushCollectors(job string, grouping map[string]string, url, method string, collectors ...prometheus.Collector) error {
+ r := prometheus.NewRegistry()
+ for _, collector := range collectors {
+ if err := r.Register(collector); err != nil {
+ return err
+ }
+ }
+ return push(job, grouping, url, r, method)
+}
+
+// HostnameGroupingKey returns a label map with the only entry
+// {instance="<hostname>"}. This can be conveniently used as the grouping
+// parameter if metrics should be pushed with the hostname as label. The
+// returned map is created upon each call so that the caller is free to add more
+// labels to the map.
+func HostnameGroupingKey() map[string]string {
+ hostname, err := os.Hostname()
+ if err != nil {
+ return map[string]string{"instance": "unknown"}
+ }
+ return map[string]string{"instance": hostname}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/push_test.go b/vendor/github.com/prometheus/client_golang/prometheus/push/push_test.go
new file mode 100644
index 000000000..28ed9b74b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/push/push_test.go
@@ -0,0 +1,176 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package push
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/prometheus/common/expfmt"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+func TestPush(t *testing.T) {
+
+ var (
+ lastMethod string
+ lastBody []byte
+ lastPath string
+ )
+
+ host, err := os.Hostname()
+ if err != nil {
+ t.Error(err)
+ }
+
+ // Fake a Pushgateway that always responds with 202.
+ pgwOK := httptest.NewServer(
+ http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ lastMethod = r.Method
+ var err error
+ lastBody, err = ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ lastPath = r.URL.EscapedPath()
+ w.Header().Set("Content-Type", `text/plain; charset=utf-8`)
+ w.WriteHeader(http.StatusAccepted)
+ }),
+ )
+ defer pgwOK.Close()
+
+ // Fake a Pushgateway that always responds with 500.
+ pgwErr := httptest.NewServer(
+ http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "fake error", http.StatusInternalServerError)
+ }),
+ )
+ defer pgwErr.Close()
+
+ metric1 := prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "testname1",
+ Help: "testhelp1",
+ })
+ metric2 := prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "testname2",
+ Help: "testhelp2",
+ ConstLabels: prometheus.Labels{"foo": "bar", "dings": "bums"},
+ })
+
+ reg := prometheus.NewRegistry()
+ reg.MustRegister(metric1)
+ reg.MustRegister(metric2)
+
+ mfs, err := reg.Gather()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ buf := &bytes.Buffer{}
+ enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
+
+ for _, mf := range mfs {
+ if err := enc.Encode(mf); err != nil {
+ t.Fatal(err)
+ }
+ }
+ wantBody := buf.Bytes()
+
+ // PushCollectors, all good.
+ if err := Collectors("testjob", HostnameGroupingKey(), pgwOK.URL, metric1, metric2); err != nil {
+ t.Fatal(err)
+ }
+ if lastMethod != "PUT" {
+ t.Error("want method PUT for PushCollectors, got", lastMethod)
+ }
+ if bytes.Compare(lastBody, wantBody) != 0 {
+ t.Errorf("got body %v, want %v", lastBody, wantBody)
+ }
+ if lastPath != "/metrics/job/testjob/instance/"+host {
+ t.Error("unexpected path:", lastPath)
+ }
+
+ // PushAddCollectors, with nil grouping, all good.
+ if err := AddCollectors("testjob", nil, pgwOK.URL, metric1, metric2); err != nil {
+ t.Fatal(err)
+ }
+ if lastMethod != "POST" {
+ t.Error("want method POST for PushAddCollectors, got", lastMethod)
+ }
+ if bytes.Compare(lastBody, wantBody) != 0 {
+ t.Errorf("got body %v, want %v", lastBody, wantBody)
+ }
+ if lastPath != "/metrics/job/testjob" {
+ t.Error("unexpected path:", lastPath)
+ }
+
+ // PushCollectors with a broken PGW.
+ if err := Collectors("testjob", nil, pgwErr.URL, metric1, metric2); err == nil {
+ t.Error("push to broken Pushgateway succeeded")
+ } else {
+ if got, want := err.Error(), "unexpected status code 500 while pushing to "+pgwErr.URL+"/metrics/job/testjob: fake error\n"; got != want {
+ t.Errorf("got error %q, want %q", got, want)
+ }
+ }
+
+ // PushCollectors with invalid grouping or job.
+ if err := Collectors("testjob", map[string]string{"foo": "bums"}, pgwErr.URL, metric1, metric2); err == nil {
+ t.Error("push with grouping contained in metrics succeeded")
+ }
+ if err := Collectors("test/job", nil, pgwErr.URL, metric1, metric2); err == nil {
+ t.Error("push with invalid job value succeeded")
+ }
+ if err := Collectors("testjob", map[string]string{"foo/bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil {
+ t.Error("push with invalid grouping succeeded")
+ }
+ if err := Collectors("testjob", map[string]string{"foo-bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil {
+ t.Error("push with invalid grouping succeeded")
+ }
+
+ // Push registry, all good.
+ if err := FromGatherer("testjob", HostnameGroupingKey(), pgwOK.URL, reg); err != nil {
+ t.Fatal(err)
+ }
+ if lastMethod != "PUT" {
+ t.Error("want method PUT for Push, got", lastMethod)
+ }
+ if bytes.Compare(lastBody, wantBody) != 0 {
+ t.Errorf("got body %v, want %v", lastBody, wantBody)
+ }
+
+ // PushAdd registry, all good.
+ if err := AddFromGatherer("testjob", map[string]string{"a": "x", "b": "y"}, pgwOK.URL, reg); err != nil {
+ t.Fatal(err)
+ }
+ if lastMethod != "POST" {
+ t.Error("want method POSTT for PushAdd, got", lastMethod)
+ }
+ if bytes.Compare(lastBody, wantBody) != 0 {
+ t.Errorf("got body %v, want %v", lastBody, wantBody)
+ }
+ if lastPath != "/metrics/job/testjob/a/x/b/y" && lastPath != "/metrics/job/testjob/b/y/a/x" {
+ t.Error("unexpected path:", lastPath)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
new file mode 100644
index 000000000..32a3986b0
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -0,0 +1,806 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "sort"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+const (
+ // Capacity for the channel to collect metrics and descriptors.
+ capMetricChan = 1000
+ capDescChan = 10
+)
+
+// DefaultRegisterer and DefaultGatherer are the implementations of the
+// Registerer and Gatherer interface a number of convenience functions in this
+// package act on. Initially, both variables point to the same Registry, which
+// has a process collector (see NewProcessCollector) and a Go collector (see
+// NewGoCollector) already registered. This approach to keep default instances
+// as global state mirrors the approach of other packages in the Go standard
+// library. Note that there are caveats. Change the variables with caution and
+// only if you understand the consequences. Users who want to avoid global state
+// altogether should not use the convenience function and act on custom
+// instances instead.
+var (
+ defaultRegistry = NewRegistry()
+ DefaultRegisterer Registerer = defaultRegistry
+ DefaultGatherer Gatherer = defaultRegistry
+)
+
+func init() {
+ MustRegister(NewProcessCollector(os.Getpid(), ""))
+ MustRegister(NewGoCollector())
+}
+
+// NewRegistry creates a new vanilla Registry without any Collectors
+// pre-registered.
+func NewRegistry() *Registry {
+ return &Registry{
+ collectorsByID: map[uint64]Collector{},
+ descIDs: map[uint64]struct{}{},
+ dimHashesByName: map[string]uint64{},
+ }
+}
+
+// NewPedanticRegistry returns a registry that checks during collection if each
+// collected Metric is consistent with its reported Desc, and if the Desc has
+// actually been registered with the registry.
+//
+// Usually, a Registry will be happy as long as the union of all collected
+// Metrics is consistent and valid even if some metrics are not consistent with
+// their own Desc or a Desc provided by their registered Collector. Well-behaved
+// Collectors and Metrics will only provide consistent Descs. This Registry is
+// useful to test the implementation of Collectors and Metrics.
+func NewPedanticRegistry() *Registry {
+ r := NewRegistry()
+ r.pedanticChecksEnabled = true
+ return r
+}
+
+// Registerer is the interface for the part of a registry in charge of
+// registering and unregistering. Users of custom registries should use
+// Registerer as type for registration purposes (rather then the Registry type
+// directly). In that way, they are free to use custom Registerer implementation
+// (e.g. for testing purposes).
+type Registerer interface {
+ // Register registers a new Collector to be included in metrics
+ // collection. It returns an error if the descriptors provided by the
+ // Collector are invalid or if they — in combination with descriptors of
+ // already registered Collectors — do not fulfill the consistency and
+ // uniqueness criteria described in the documentation of metric.Desc.
+ //
+ // If the provided Collector is equal to a Collector already registered
+ // (which includes the case of re-registering the same Collector), the
+ // returned error is an instance of AlreadyRegisteredError, which
+ // contains the previously registered Collector.
+ //
+ // It is in general not safe to register the same Collector multiple
+ // times concurrently.
+ Register(Collector) error
+ // MustRegister works like Register but registers any number of
+ // Collectors and panics upon the first registration that causes an
+ // error.
+ MustRegister(...Collector)
+ // Unregister unregisters the Collector that equals the Collector passed
+ // in as an argument. (Two Collectors are considered equal if their
+ // Describe method yields the same set of descriptors.) The function
+ // returns whether a Collector was unregistered.
+ //
+ // Note that even after unregistering, it will not be possible to
+ // register a new Collector that is inconsistent with the unregistered
+ // Collector, e.g. a Collector collecting metrics with the same name but
+ // a different help string. The rationale here is that the same registry
+ // instance must only collect consistent metrics throughout its
+ // lifetime.
+ Unregister(Collector) bool
+}
+
+// Gatherer is the interface for the part of a registry in charge of gathering
+// the collected metrics into a number of MetricFamilies. The Gatherer interface
+// comes with the same general implication as described for the Registerer
+// interface.
+type Gatherer interface {
+ // Gather calls the Collect method of the registered Collectors and then
+ // gathers the collected metrics into a lexicographically sorted slice
+ // of MetricFamily protobufs. Even if an error occurs, Gather attempts
+ // to gather as many metrics as possible. Hence, if a non-nil error is
+ // returned, the returned MetricFamily slice could be nil (in case of a
+ // fatal error that prevented any meaningful metric collection) or
+ // contain a number of MetricFamily protobufs, some of which might be
+ // incomplete, and some might be missing altogether. The returned error
+ // (which might be a MultiError) explains the details. In scenarios
+ // where complete collection is critical, the returned MetricFamily
+ // protobufs should be disregarded if the returned error is non-nil.
+ Gather() ([]*dto.MetricFamily, error)
+}
+
+// Register registers the provided Collector with the DefaultRegisterer.
+//
+// Register is a shortcut for DefaultRegisterer.Register(c). See there for more
+// details.
+func Register(c Collector) error {
+ return DefaultRegisterer.Register(c)
+}
+
+// MustRegister registers the provided Collectors with the DefaultRegisterer and
+// panics if any error occurs.
+//
+// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See
+// there for more details.
+func MustRegister(cs ...Collector) {
+ DefaultRegisterer.MustRegister(cs...)
+}
+
+// RegisterOrGet registers the provided Collector with the DefaultRegisterer and
+// returns the Collector, unless an equal Collector was registered before, in
+// which case that Collector is returned.
+//
+// Deprecated: RegisterOrGet is merely a convenience function for the
+// implementation as described in the documentation for
+// AlreadyRegisteredError. As the use case is relatively rare, this function
+// will be removed in a future version of this package to clean up the
+// namespace.
+func RegisterOrGet(c Collector) (Collector, error) {
+ if err := Register(c); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ return are.ExistingCollector, nil
+ }
+ return nil, err
+ }
+ return c, nil
+}
+
+// MustRegisterOrGet behaves like RegisterOrGet but panics instead of returning
+// an error.
+//
+// Deprecated: This is deprecated for the same reason RegisterOrGet is. See
+// there for details.
+func MustRegisterOrGet(c Collector) Collector {
+ c, err := RegisterOrGet(c)
+ if err != nil {
+ panic(err)
+ }
+ return c
+}
+
+// Unregister removes the registration of the provided Collector from the
+// DefaultRegisterer.
+//
+// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for
+// more details.
+func Unregister(c Collector) bool {
+ return DefaultRegisterer.Unregister(c)
+}
+
+// GathererFunc turns a function into a Gatherer.
+type GathererFunc func() ([]*dto.MetricFamily, error)
+
+// Gather implements Gatherer.
+func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
+ return gf()
+}
+
+// SetMetricFamilyInjectionHook replaces the DefaultGatherer with one that
+// gathers from the previous DefaultGatherers but then merges the MetricFamily
+// protobufs returned from the provided hook function with the MetricFamily
+// protobufs returned from the original DefaultGatherer.
+//
+// Deprecated: This function manipulates the DefaultGatherer variable. Consider
+// the implications, i.e. don't do this concurrently with any uses of the
+// DefaultGatherer. In the rare cases where you need to inject MetricFamily
+// protobufs directly, it is recommended to use a custom Registry and combine it
+// with a custom Gatherer using the Gatherers type (see
+// there). SetMetricFamilyInjectionHook only exists for compatibility reasons
+// with previous versions of this package.
+func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
+ DefaultGatherer = Gatherers{
+ DefaultGatherer,
+ GathererFunc(func() ([]*dto.MetricFamily, error) { return hook(), nil }),
+ }
+}
+
+// AlreadyRegisteredError is returned by the Register method if the Collector to
+// be registered has already been registered before, or a different Collector
+// that collects the same metrics has been registered before. Registration fails
+// in that case, but you can detect from the kind of error what has
+// happened. The error contains fields for the existing Collector and the
+// (rejected) new Collector that equals the existing one. This can be used to
+// find out if an equal Collector has been registered before and switch over to
+// using the old one, as demonstrated in the example.
+type AlreadyRegisteredError struct {
+ ExistingCollector, NewCollector Collector
+}
+
+func (err AlreadyRegisteredError) Error() string {
+ return "duplicate metrics collector registration attempted"
+}
+
+// MultiError is a slice of errors implementing the error interface. It is used
+// by a Gatherer to report multiple errors during MetricFamily gathering.
+type MultiError []error
+
+func (errs MultiError) Error() string {
+ if len(errs) == 0 {
+ return ""
+ }
+ buf := &bytes.Buffer{}
+ fmt.Fprintf(buf, "%d error(s) occurred:", len(errs))
+ for _, err := range errs {
+ fmt.Fprintf(buf, "\n* %s", err)
+ }
+ return buf.String()
+}
+
+// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
+// contained error as error if len(errs is 1). In all other cases, it returns
+// the MultiError directly. This is helpful for returning a MultiError in a way
+// that only uses the MultiError if needed.
+func (errs MultiError) MaybeUnwrap() error {
+ switch len(errs) {
+ case 0:
+ return nil
+ case 1:
+ return errs[0]
+ default:
+ return errs
+ }
+}
+
+// Registry registers Prometheus collectors, collects their metrics, and gathers
+// them into MetricFamilies for exposition. It implements both Registerer and
+// Gatherer. The zero value is not usable. Create instances with NewRegistry or
+// NewPedanticRegistry.
+type Registry struct {
+ mtx sync.RWMutex
+ collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
+ descIDs map[uint64]struct{}
+ dimHashesByName map[string]uint64
+ pedanticChecksEnabled bool
+}
+
+// Register implements Registerer.
+func (r *Registry) Register(c Collector) error {
+ var (
+ descChan = make(chan *Desc, capDescChan)
+ newDescIDs = map[uint64]struct{}{}
+ newDimHashesByName = map[string]uint64{}
+ collectorID uint64 // Just a sum of all desc IDs.
+ duplicateDescErr error
+ )
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+ // Coduct various tests...
+ for desc := range descChan {
+
+ // Is the descriptor valid at all?
+ if desc.err != nil {
+ return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
+ }
+
+ // Is the descID unique?
+ // (In other words: Is the fqName + constLabel combination unique?)
+ if _, exists := r.descIDs[desc.id]; exists {
+ duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
+ }
+ // If it is not a duplicate desc in this collector, add it to
+ // the collectorID. (We allow duplicate descs within the same
+ // collector, but their existence must be a no-op.)
+ if _, exists := newDescIDs[desc.id]; !exists {
+ newDescIDs[desc.id] = struct{}{}
+ collectorID += desc.id
+ }
+
+ // Are all the label names and the help string consistent with
+ // previous descriptors of the same name?
+ // First check existing descriptors...
+ if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
+ }
+ } else {
+ // ...then check the new descriptors already seen.
+ if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+ }
+ } else {
+ newDimHashesByName[desc.fqName] = desc.dimHash
+ }
+ }
+ }
+ // Did anything happen at all?
+ if len(newDescIDs) == 0 {
+ return errors.New("collector has no descriptors")
+ }
+ if existing, exists := r.collectorsByID[collectorID]; exists {
+ return AlreadyRegisteredError{
+ ExistingCollector: existing,
+ NewCollector: c,
+ }
+ }
+ // If the collectorID is new, but at least one of the descs existed
+ // before, we are in trouble.
+ if duplicateDescErr != nil {
+ return duplicateDescErr
+ }
+
+ // Only after all tests have passed, actually register.
+ r.collectorsByID[collectorID] = c
+ for hash := range newDescIDs {
+ r.descIDs[hash] = struct{}{}
+ }
+ for name, dimHash := range newDimHashesByName {
+ r.dimHashesByName[name] = dimHash
+ }
+ return nil
+}
+
+// Unregister implements Registerer.
+func (r *Registry) Unregister(c Collector) bool {
+ var (
+ descChan = make(chan *Desc, capDescChan)
+ descIDs = map[uint64]struct{}{}
+ collectorID uint64 // Just a sum of the desc IDs.
+ )
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+ for desc := range descChan {
+ if _, exists := descIDs[desc.id]; !exists {
+ collectorID += desc.id
+ descIDs[desc.id] = struct{}{}
+ }
+ }
+
+ r.mtx.RLock()
+ if _, exists := r.collectorsByID[collectorID]; !exists {
+ r.mtx.RUnlock()
+ return false
+ }
+ r.mtx.RUnlock()
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ delete(r.collectorsByID, collectorID)
+ for id := range descIDs {
+ delete(r.descIDs, id)
+ }
+ // dimHashesByName is left untouched as those must be consistent
+ // throughout the lifetime of a program.
+ return true
+}
+
+// MustRegister implements Registerer.
+func (r *Registry) MustRegister(cs ...Collector) {
+ for _, c := range cs {
+ if err := r.Register(c); err != nil {
+ panic(err)
+ }
+ }
+}
+
+// Gather implements Gatherer.
+func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
+ var (
+ metricChan = make(chan Metric, capMetricChan)
+ metricHashes = map[uint64]struct{}{}
+ dimHashes = map[string]uint64{}
+ wg sync.WaitGroup
+ errs MultiError // The collected errors to return in the end.
+ registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
+ )
+
+ r.mtx.RLock()
+ metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
+
+ // Scatter.
+ // (Collectors could be complex and slow, so we call them all at once.)
+ wg.Add(len(r.collectorsByID))
+ go func() {
+ wg.Wait()
+ close(metricChan)
+ }()
+ for _, collector := range r.collectorsByID {
+ go func(collector Collector) {
+ defer wg.Done()
+ collector.Collect(metricChan)
+ }(collector)
+ }
+
+ // In case pedantic checks are enabled, we have to copy the map before
+ // giving up the RLock.
+ if r.pedanticChecksEnabled {
+ registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs))
+ for id := range r.descIDs {
+ registeredDescIDs[id] = struct{}{}
+ }
+ }
+
+ r.mtx.RUnlock()
+
+ // Drain metricChan in case of premature return.
+ defer func() {
+ for _ = range metricChan {
+ }
+ }()
+
+ // Gather.
+ for metric := range metricChan {
+ // This could be done concurrently, too, but it required locking
+ // of metricFamiliesByName (and of metricHashes if checks are
+ // enabled). Most likely not worth it.
+ desc := metric.Desc()
+ dtoMetric := &dto.Metric{}
+ if err := metric.Write(dtoMetric); err != nil {
+ errs = append(errs, fmt.Errorf(
+ "error collecting metric %v: %s", desc, err,
+ ))
+ continue
+ }
+ metricFamily, ok := metricFamiliesByName[desc.fqName]
+ if ok {
+ if metricFamily.GetHelp() != desc.help {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
+ ))
+ continue
+ }
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch metricFamily.GetType() {
+ case dto.MetricType_COUNTER:
+ if dtoMetric.Counter == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Counter",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_GAUGE:
+ if dtoMetric.Gauge == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Gauge",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_SUMMARY:
+ if dtoMetric.Summary == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Summary",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_UNTYPED:
+ if dtoMetric.Untyped == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be Untyped",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_HISTOGRAM:
+ if dtoMetric.Histogram == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Histogram",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ default:
+ panic("encountered MetricFamily with invalid type")
+ }
+ } else {
+ metricFamily = &dto.MetricFamily{}
+ metricFamily.Name = proto.String(desc.fqName)
+ metricFamily.Help = proto.String(desc.help)
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch {
+ case dtoMetric.Gauge != nil:
+ metricFamily.Type = dto.MetricType_GAUGE.Enum()
+ case dtoMetric.Counter != nil:
+ metricFamily.Type = dto.MetricType_COUNTER.Enum()
+ case dtoMetric.Summary != nil:
+ metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+ case dtoMetric.Untyped != nil:
+ metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+ case dtoMetric.Histogram != nil:
+ metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+ default:
+ errs = append(errs, fmt.Errorf(
+ "empty metric collected: %s", dtoMetric,
+ ))
+ continue
+ }
+ metricFamiliesByName[desc.fqName] = metricFamily
+ }
+ if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ if r.pedanticChecksEnabled {
+ // Is the desc registered at all?
+ if _, exist := registeredDescIDs[desc.id]; !exist {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s with unregistered descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ ))
+ continue
+ }
+ if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ }
+ metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+ }
+ return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// Gatherers is a slice of Gatherer instances that implements the Gatherer
+// interface itself. Its Gather method calls Gather on all Gatherers in the
+// slice in order and returns the merged results. Errors returned from the
+// Gather calles are all returned in a flattened MultiError. Duplicate and
+// inconsistent Metrics are skipped (first occurrence in slice order wins) and
+// reported in the returned error.
+//
+// Gatherers can be used to merge the Gather results from multiple
+// Registries. It also provides a way to directly inject existing MetricFamily
+// protobufs into the gathering by creating a custom Gatherer with a Gather
+// method that simply returns the existing MetricFamily protobufs. Note that no
+// registration is involved (in contrast to Collector registration), so
+// obviously registration-time checks cannot happen. Any inconsistencies between
+// the gathered MetricFamilies are reported as errors by the Gather method, and
+// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies
+// (e.g. syntactically invalid metric or label names) will go undetected.
+type Gatherers []Gatherer
+
+// Gather implements Gatherer.
+func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
+ var (
+ metricFamiliesByName = map[string]*dto.MetricFamily{}
+ metricHashes = map[uint64]struct{}{}
+ dimHashes = map[string]uint64{}
+ errs MultiError // The collected errors to return in the end.
+ )
+
+ for i, g := range gs {
+ mfs, err := g.Gather()
+ if err != nil {
+ if multiErr, ok := err.(MultiError); ok {
+ for _, err := range multiErr {
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ }
+ } else {
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ }
+ }
+ for _, mf := range mfs {
+ existingMF, exists := metricFamiliesByName[mf.GetName()]
+ if exists {
+ if existingMF.GetHelp() != mf.GetHelp() {
+ errs = append(errs, fmt.Errorf(
+ "gathered metric family %s has help %q but should have %q",
+ mf.GetName(), mf.GetHelp(), existingMF.GetHelp(),
+ ))
+ continue
+ }
+ if existingMF.GetType() != mf.GetType() {
+ errs = append(errs, fmt.Errorf(
+ "gathered metric family %s has type %s but should have %s",
+ mf.GetName(), mf.GetType(), existingMF.GetType(),
+ ))
+ continue
+ }
+ } else {
+ existingMF = &dto.MetricFamily{}
+ existingMF.Name = mf.Name
+ existingMF.Help = mf.Help
+ existingMF.Type = mf.Type
+ metricFamiliesByName[mf.GetName()] = existingMF
+ }
+ for _, m := range mf.Metric {
+ if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ existingMF.Metric = append(existingMF.Metric, m)
+ }
+ }
+ }
+ return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// metricSorter is a sortable slice of *dto.Metric.
+type metricSorter []*dto.Metric
+
+func (s metricSorter) Len() int {
+ return len(s)
+}
+
+func (s metricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s metricSorter) Less(i, j int) bool {
+ if len(s[i].Label) != len(s[j].Label) {
+ // This should not happen. The metrics are
+ // inconsistent. However, we have to deal with the fact, as
+ // people might use custom collectors or metric family injection
+ // to create inconsistent metrics. So let's simply compare the
+ // number of labels in this case. That will still yield
+ // reproducible sorting.
+ return len(s[i].Label) < len(s[j].Label)
+ }
+ for n, lp := range s[i].Label {
+ vi := lp.GetValue()
+ vj := s[j].Label[n].GetValue()
+ if vi != vj {
+ return vi < vj
+ }
+ }
+
+ // We should never arrive here. Multiple metrics with the same
+ // label set in the same scrape will lead to undefined ingestion
+ // behavior. However, as above, we have to provide stable sorting
+ // here, even for inconsistent metrics. So sort equal metrics
+ // by their timestamp, with missing timestamps (implying "now")
+ // coming last.
+ if s[i].TimestampMs == nil {
+ return false
+ }
+ if s[j].TimestampMs == nil {
+ return true
+ }
+ return s[i].GetTimestampMs() < s[j].GetTimestampMs()
+}
+
+// normalizeMetricFamilies returns a MetricFamily slice whith empty
+// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
+// the slice, with the contained Metrics sorted within each MetricFamily.
+func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
+ for _, mf := range metricFamiliesByName {
+ sort.Sort(metricSorter(mf.Metric))
+ }
+ names := make([]string, 0, len(metricFamiliesByName))
+ for name, mf := range metricFamiliesByName {
+ if len(mf.Metric) > 0 {
+ names = append(names, name)
+ }
+ }
+ sort.Strings(names)
+ result := make([]*dto.MetricFamily, 0, len(names))
+ for _, name := range names {
+ result = append(result, metricFamiliesByName[name])
+ }
+ return result
+}
+
+// checkMetricConsistency checks if the provided Metric is consistent with the
+// provided MetricFamily. It also hashed the Metric labels and the MetricFamily
+// name. If the resulting hash is alread in the provided metricHashes, an error
+// is returned. If not, it is added to metricHashes. The provided dimHashes maps
+// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
+// doesn't yet contain a hash for the provided MetricFamily, it is
+// added. Otherwise, an error is returned if the existing dimHashes in not equal
+// the calculated dimHash.
+func checkMetricConsistency(
+ metricFamily *dto.MetricFamily,
+ dtoMetric *dto.Metric,
+ metricHashes map[uint64]struct{},
+ dimHashes map[string]uint64,
+) error {
+ // Type consistency with metric family.
+ if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
+ metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
+ metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
+ metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
+ metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
+ return fmt.Errorf(
+ "collected metric %s %s is not a %s",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
+ )
+ }
+
+ // Is the metric unique (i.e. no other metric with the same name and the same label values)?
+ h := hashNew()
+ h = hashAdd(h, metricFamily.GetName())
+ h = hashAddByte(h, separatorByte)
+ dh := hashNew()
+ // Make sure label pairs are sorted. We depend on it for the consistency
+ // check.
+ sort.Sort(LabelPairSorter(dtoMetric.Label))
+ for _, lp := range dtoMetric.Label {
+ h = hashAdd(h, lp.GetValue())
+ h = hashAddByte(h, separatorByte)
+ dh = hashAdd(dh, lp.GetName())
+ dh = hashAddByte(dh, separatorByte)
+ }
+ if _, exists := metricHashes[h]; exists {
+ return fmt.Errorf(
+ "collected metric %s %s was collected before with the same name and label values",
+ metricFamily.GetName(), dtoMetric,
+ )
+ }
+ if dimHash, ok := dimHashes[metricFamily.GetName()]; ok {
+ if dimHash != dh {
+ return fmt.Errorf(
+ "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family",
+ metricFamily.GetName(), dtoMetric,
+ )
+ }
+ } else {
+ dimHashes[metricFamily.GetName()] = dh
+ }
+ metricHashes[h] = struct{}{}
+ return nil
+}
+
+func checkDescConsistency(
+ metricFamily *dto.MetricFamily,
+ dtoMetric *dto.Metric,
+ desc *Desc,
+) error {
+ // Desc help consistency with metric family help.
+ if metricFamily.GetHelp() != desc.help {
+ return fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
+ )
+ }
+
+ // Is the desc consistent with the content of the metric?
+ lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
+ lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
+ for _, l := range desc.variableLabels {
+ lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
+ Name: proto.String(l),
+ })
+ }
+ if len(lpsFromDesc) != len(dtoMetric.Label) {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ sort.Sort(LabelPairSorter(lpsFromDesc))
+ for i, lpFromDesc := range lpsFromDesc {
+ lpFromMetric := dtoMetric.Label[i]
+ if lpFromDesc.GetName() != lpFromMetric.GetName() ||
+ lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go b/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go
new file mode 100644
index 000000000..9dacb6256
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go
@@ -0,0 +1,545 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package prometheus_test
+
+import (
+ "bytes"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/expfmt"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+)
+
+func testHandler(t testing.TB) {
+
+ metricVec := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "name",
+ Help: "docstring",
+ ConstLabels: prometheus.Labels{"constname": "constvalue"},
+ },
+ []string{"labelname"},
+ )
+
+ metricVec.WithLabelValues("val1").Inc()
+ metricVec.WithLabelValues("val2").Inc()
+
+ externalMetricFamily := &dto.MetricFamily{
+ Name: proto.String("externalname"),
+ Help: proto.String("externaldocstring"),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{
+ {
+ Name: proto.String("externalconstname"),
+ Value: proto.String("externalconstvalue"),
+ },
+ {
+ Name: proto.String("externallabelname"),
+ Value: proto.String("externalval1"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(1),
+ },
+ },
+ },
+ }
+ externalBuf := &bytes.Buffer{}
+ enc := expfmt.NewEncoder(externalBuf, expfmt.FmtProtoDelim)
+ if err := enc.Encode(externalMetricFamily); err != nil {
+ t.Fatal(err)
+ }
+ externalMetricFamilyAsBytes := externalBuf.Bytes()
+ externalMetricFamilyAsText := []byte(`# HELP externalname externaldocstring
+# TYPE externalname counter
+externalname{externalconstname="externalconstvalue",externallabelname="externalval1"} 1
+`)
+ externalMetricFamilyAsProtoText := []byte(`name: "externalname"
+help: "externaldocstring"
+type: COUNTER
+metric: <
+ label: <
+ name: "externalconstname"
+ value: "externalconstvalue"
+ >
+ label: <
+ name: "externallabelname"
+ value: "externalval1"
+ >
+ counter: <
+ value: 1
+ >
+>
+
+`)
+ externalMetricFamilyAsProtoCompactText := []byte(`name:"externalname" help:"externaldocstring" type:COUNTER metric:<label:<name:"externalconstname" value:"externalconstvalue" > label:<name:"externallabelname" value:"externalval1" > counter:<value:1 > >
+`)
+
+ expectedMetricFamily := &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("docstring"),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{
+ {
+ Name: proto.String("constname"),
+ Value: proto.String("constvalue"),
+ },
+ {
+ Name: proto.String("labelname"),
+ Value: proto.String("val1"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(1),
+ },
+ },
+ {
+ Label: []*dto.LabelPair{
+ {
+ Name: proto.String("constname"),
+ Value: proto.String("constvalue"),
+ },
+ {
+ Name: proto.String("labelname"),
+ Value: proto.String("val2"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(1),
+ },
+ },
+ },
+ }
+ buf := &bytes.Buffer{}
+ enc = expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
+ if err := enc.Encode(expectedMetricFamily); err != nil {
+ t.Fatal(err)
+ }
+ expectedMetricFamilyAsBytes := buf.Bytes()
+ expectedMetricFamilyAsText := []byte(`# HELP name docstring
+# TYPE name counter
+name{constname="constvalue",labelname="val1"} 1
+name{constname="constvalue",labelname="val2"} 1
+`)
+ expectedMetricFamilyAsProtoText := []byte(`name: "name"
+help: "docstring"
+type: COUNTER
+metric: <
+ label: <
+ name: "constname"
+ value: "constvalue"
+ >
+ label: <
+ name: "labelname"
+ value: "val1"
+ >
+ counter: <
+ value: 1
+ >
+>
+metric: <
+ label: <
+ name: "constname"
+ value: "constvalue"
+ >
+ label: <
+ name: "labelname"
+ value: "val2"
+ >
+ counter: <
+ value: 1
+ >
+>
+
+`)
+ expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > >
+`)
+
+ externalMetricFamilyWithSameName := &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("docstring"),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{
+ {
+ Name: proto.String("constname"),
+ Value: proto.String("constvalue"),
+ },
+ {
+ Name: proto.String("labelname"),
+ Value: proto.String("different_val"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(42),
+ },
+ },
+ },
+ }
+
+ expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"different_val" > counter:<value:42 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > >
+`)
+
+ type output struct {
+ headers map[string]string
+ body []byte
+ }
+
+ var scenarios = []struct {
+ headers map[string]string
+ out output
+ collector prometheus.Collector
+ externalMF []*dto.MetricFamily
+ }{
+ { // 0
+ headers: map[string]string{
+ "Accept": "foo/bar;q=0.2, dings/bums;q=0.8",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `text/plain; version=0.0.4`,
+ },
+ body: []byte{},
+ },
+ },
+ { // 1
+ headers: map[string]string{
+ "Accept": "foo/bar;q=0.2, application/quark;q=0.8",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `text/plain; version=0.0.4`,
+ },
+ body: []byte{},
+ },
+ },
+ { // 2
+ headers: map[string]string{
+ "Accept": "foo/bar;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.8",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `text/plain; version=0.0.4`,
+ },
+ body: []byte{},
+ },
+ },
+ { // 3
+ headers: map[string]string{
+ "Accept": "text/plain;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.8",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
+ },
+ body: []byte{},
+ },
+ },
+ { // 4
+ headers: map[string]string{
+ "Accept": "application/json",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `text/plain; version=0.0.4`,
+ },
+ body: expectedMetricFamilyAsText,
+ },
+ collector: metricVec,
+ },
+ { // 5
+ headers: map[string]string{
+ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
+ },
+ body: expectedMetricFamilyAsBytes,
+ },
+ collector: metricVec,
+ },
+ { // 6
+ headers: map[string]string{
+ "Accept": "application/json",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `text/plain; version=0.0.4`,
+ },
+ body: externalMetricFamilyAsText,
+ },
+ externalMF: []*dto.MetricFamily{externalMetricFamily},
+ },
+ { // 7
+ headers: map[string]string{
+ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
+ },
+ body: externalMetricFamilyAsBytes,
+ },
+ externalMF: []*dto.MetricFamily{externalMetricFamily},
+ },
+ { // 8
+ headers: map[string]string{
+ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
+ },
+ body: bytes.Join(
+ [][]byte{
+ externalMetricFamilyAsBytes,
+ expectedMetricFamilyAsBytes,
+ },
+ []byte{},
+ ),
+ },
+ collector: metricVec,
+ externalMF: []*dto.MetricFamily{externalMetricFamily},
+ },
+ { // 9
+ headers: map[string]string{
+ "Accept": "text/plain",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `text/plain; version=0.0.4`,
+ },
+ body: []byte{},
+ },
+ },
+ { // 10
+ headers: map[string]string{
+ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `text/plain; version=0.0.4`,
+ },
+ body: expectedMetricFamilyAsText,
+ },
+ collector: metricVec,
+ },
+ { // 11
+ headers: map[string]string{
+ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5;version=0.0.4",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `text/plain; version=0.0.4`,
+ },
+ body: bytes.Join(
+ [][]byte{
+ externalMetricFamilyAsText,
+ expectedMetricFamilyAsText,
+ },
+ []byte{},
+ ),
+ },
+ collector: metricVec,
+ externalMF: []*dto.MetricFamily{externalMetricFamily},
+ },
+ { // 12
+ headers: map[string]string{
+ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.2, text/plain;q=0.5;version=0.0.2",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
+ },
+ body: bytes.Join(
+ [][]byte{
+ externalMetricFamilyAsBytes,
+ expectedMetricFamilyAsBytes,
+ },
+ []byte{},
+ ),
+ },
+ collector: metricVec,
+ externalMF: []*dto.MetricFamily{externalMetricFamily},
+ },
+ { // 13
+ headers: map[string]string{
+ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=text;q=0.5, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.4",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`,
+ },
+ body: bytes.Join(
+ [][]byte{
+ externalMetricFamilyAsProtoText,
+ expectedMetricFamilyAsProtoText,
+ },
+ []byte{},
+ ),
+ },
+ collector: metricVec,
+ externalMF: []*dto.MetricFamily{externalMetricFamily},
+ },
+ { // 14
+ headers: map[string]string{
+ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`,
+ },
+ body: bytes.Join(
+ [][]byte{
+ externalMetricFamilyAsProtoCompactText,
+ expectedMetricFamilyAsProtoCompactText,
+ },
+ []byte{},
+ ),
+ },
+ collector: metricVec,
+ externalMF: []*dto.MetricFamily{externalMetricFamily},
+ },
+ { // 15
+ headers: map[string]string{
+ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`,
+ },
+ body: bytes.Join(
+ [][]byte{
+ externalMetricFamilyAsProtoCompactText,
+ expectedMetricFamilyMergedWithExternalAsProtoCompactText,
+ },
+ []byte{},
+ ),
+ },
+ collector: metricVec,
+ externalMF: []*dto.MetricFamily{
+ externalMetricFamily,
+ externalMetricFamilyWithSameName,
+ },
+ },
+ }
+ for i, scenario := range scenarios {
+ registry := prometheus.NewPedanticRegistry()
+ gatherer := prometheus.Gatherer(registry)
+ if scenario.externalMF != nil {
+ gatherer = prometheus.Gatherers{
+ registry,
+ prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) {
+ return scenario.externalMF, nil
+ }),
+ }
+ }
+
+ if scenario.collector != nil {
+ registry.Register(scenario.collector)
+ }
+ writer := httptest.NewRecorder()
+ handler := prometheus.InstrumentHandler("prometheus", promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{}))
+ request, _ := http.NewRequest("GET", "/", nil)
+ for key, value := range scenario.headers {
+ request.Header.Add(key, value)
+ }
+ handler(writer, request)
+
+ for key, value := range scenario.out.headers {
+ if writer.HeaderMap.Get(key) != value {
+ t.Errorf(
+ "%d. expected %q for header %q, got %q",
+ i, value, key, writer.Header().Get(key),
+ )
+ }
+ }
+
+ if !bytes.Equal(scenario.out.body, writer.Body.Bytes()) {
+ t.Errorf(
+ "%d. expected body:\n%s\ngot body:\n%s\n",
+ i, scenario.out.body, writer.Body.Bytes(),
+ )
+ }
+ }
+}
+
+func TestHandler(t *testing.T) {
+ testHandler(t)
+}
+
+func BenchmarkHandler(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testHandler(b)
+ }
+}
+
+func TestRegisterWithOrGet(t *testing.T) {
+ // Replace the default registerer just to be sure. This is bad, but this
+ // whole test will go away once RegisterOrGet is removed.
+ oldRegisterer := prometheus.DefaultRegisterer
+ defer func() {
+ prometheus.DefaultRegisterer = oldRegisterer
+ }()
+ prometheus.DefaultRegisterer = prometheus.NewRegistry()
+ original := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "test",
+ Help: "help",
+ },
+ []string{"foo", "bar"},
+ )
+ equalButNotSame := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "test",
+ Help: "help",
+ },
+ []string{"foo", "bar"},
+ )
+ if err := prometheus.Register(original); err != nil {
+ t.Fatal(err)
+ }
+ if err := prometheus.Register(equalButNotSame); err == nil {
+ t.Fatal("expected error when registringe equal collector")
+ }
+ existing, err := prometheus.RegisterOrGet(equalButNotSame)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if existing != original {
+ t.Error("expected original collector but got something else")
+ }
+ if existing == equalButNotSame {
+ t.Error("expected original callector but got new one")
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
new file mode 100644
index 000000000..bce05bf9a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -0,0 +1,534 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/beorn7/perks/quantile"
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// quantileLabel is used for the label that defines the quantile in a
+// summary.
+const quantileLabel = "quantile"
+
+// A Summary captures individual observations from an event or sample stream and
+// summarizes them in a manner similar to traditional summary statistics: 1. sum
+// of observations, 2. observation count, 3. rank estimations.
+//
+// A typical use-case is the observation of request latencies. By default, a
+// Summary provides the median, the 90th and the 99th percentile of the latency
+// as rank estimations.
+//
+// Note that the rank estimations cannot be aggregated in a meaningful way with
+// the Prometheus query language (i.e. you cannot average or add them). If you
+// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
+// queries served across all instances of a service), consider the Histogram
+// metric type. See the Prometheus documentation for more details.
+//
+// To create Summary instances, use NewSummary.
+type Summary interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the summary.
+ Observe(float64)
+}
+
+// DefObjectives are the default Summary quantile values.
+var (
+ DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
+
+ errQuantileLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in summaries", quantileLabel,
+ )
+)
+
+// Default values for SummaryOpts.
+const (
+ // DefMaxAge is the default duration for which observations stay
+ // relevant.
+ DefMaxAge time.Duration = 10 * time.Minute
+ // DefAgeBuckets is the default number of buckets used to calculate the
+ // age of observations.
+ DefAgeBuckets = 5
+ // DefBufCap is the standard buffer size for collecting Summary observations.
+ DefBufCap = 500
+)
+
+// SummaryOpts bundles the options for creating a Summary metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type SummaryOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Summary (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Summary must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Summary. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Summary. Summaries with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // SummaryVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Summaries with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Objectives defines the quantile rank estimates with their respective
+ // absolute error. If Objectives[q] = e, then the value reported
+ // for q will be the φ-quantile value for some φ between q-e and q+e.
+ // The default value is DefObjectives.
+ Objectives map[float64]float64
+
+ // MaxAge defines the duration for which an observation stays relevant
+ // for the summary. Must be positive. The default value is DefMaxAge.
+ MaxAge time.Duration
+
+ // AgeBuckets is the number of buckets used to exclude observations that
+ // are older than MaxAge from the summary. A higher number has a
+ // resource penalty, so only increase it if the higher resolution is
+ // really required. For very high observation rates, you might want to
+ // reduce the number of age buckets. With only one age bucket, you will
+ // effectively see a complete reset of the summary each time MaxAge has
+ // passed. The default value is DefAgeBuckets.
+ AgeBuckets uint32
+
+ // BufCap defines the default sample stream buffer size. The default
+ // value of DefBufCap should suffice for most uses. If there is a need
+ // to increase the value, a multiple of 500 is recommended (because that
+ // is the internal buffer size of the underlying package
+ // "github.com/bmizerany/perks/quantile").
+ BufCap uint32
+}
+
+// Great fuck-up with the sliding-window decay algorithm... The Merge method of
+// perk/quantile is actually not working as advertised - and it might be
+// unfixable, as the underlying algorithm is apparently not capable of merging
+// summaries in the first place. To avoid using Merge, we are currently adding
+// observations to _each_ age bucket, i.e. the effort to add a sample is
+// essentially multiplied by the number of age buckets. When rotating age
+// buckets, we empty the previous head stream. On scrape time, we simply take
+// the quantiles from the head stream (no merging required). Result: More effort
+// on observation time, less effort on scrape time, which is exactly the
+// opposite of what we try to accomplish, but at least the results are correct.
+//
+// The quite elegant previous contraption to merge the age buckets efficiently
+// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
+// can't be used anymore.
+
+// NewSummary creates a new Summary based on the provided SummaryOpts.
+func NewSummary(opts SummaryOpts) Summary {
+ return newSummary(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Objectives) == 0 {
+ opts.Objectives = DefObjectives
+ }
+
+ if opts.MaxAge < 0 {
+ panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
+ }
+ if opts.MaxAge == 0 {
+ opts.MaxAge = DefMaxAge
+ }
+
+ if opts.AgeBuckets == 0 {
+ opts.AgeBuckets = DefAgeBuckets
+ }
+
+ if opts.BufCap == 0 {
+ opts.BufCap = DefBufCap
+ }
+
+ s := &summary{
+ desc: desc,
+
+ objectives: opts.Objectives,
+ sortedObjectives: make([]float64, 0, len(opts.Objectives)),
+
+ labelPairs: makeLabelPairs(desc, labelValues),
+
+ hotBuf: make([]float64, 0, opts.BufCap),
+ coldBuf: make([]float64, 0, opts.BufCap),
+ streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
+ }
+ s.headStreamExpTime = time.Now().Add(s.streamDuration)
+ s.hotBufExpTime = s.headStreamExpTime
+
+ for i := uint32(0); i < opts.AgeBuckets; i++ {
+ s.streams = append(s.streams, s.newStream())
+ }
+ s.headStream = s.streams[0]
+
+ for qu := range s.objectives {
+ s.sortedObjectives = append(s.sortedObjectives, qu)
+ }
+ sort.Float64s(s.sortedObjectives)
+
+ s.init(s) // Init self-collection.
+ return s
+}
+
+type summary struct {
+ selfCollector
+
+ bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
+ mtx sync.Mutex // Protects every other moving part.
+ // Lock bufMtx before mtx if both are needed.
+
+ desc *Desc
+
+ objectives map[float64]float64
+ sortedObjectives []float64
+
+ labelPairs []*dto.LabelPair
+
+ sum float64
+ cnt uint64
+
+ hotBuf, coldBuf []float64
+
+ streams []*quantile.Stream
+ streamDuration time.Duration
+ headStream *quantile.Stream
+ headStreamIdx int
+ headStreamExpTime, hotBufExpTime time.Time
+}
+
+func (s *summary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *summary) Observe(v float64) {
+ s.bufMtx.Lock()
+ defer s.bufMtx.Unlock()
+
+ now := time.Now()
+ if now.After(s.hotBufExpTime) {
+ s.asyncFlush(now)
+ }
+ s.hotBuf = append(s.hotBuf, v)
+ if len(s.hotBuf) == cap(s.hotBuf) {
+ s.asyncFlush(now)
+ }
+}
+
+func (s *summary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.objectives))
+
+ s.bufMtx.Lock()
+ s.mtx.Lock()
+ // Swap bufs even if hotBuf is empty to set new hotBufExpTime.
+ s.swapBufs(time.Now())
+ s.bufMtx.Unlock()
+
+ s.flushColdBuf()
+ sum.SampleCount = proto.Uint64(s.cnt)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for _, rank := range s.sortedObjectives {
+ var q float64
+ if s.headStream.Count() == 0 {
+ q = math.NaN()
+ } else {
+ q = s.headStream.Query(rank)
+ }
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ s.mtx.Unlock()
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+ return nil
+}
+
+func (s *summary) newStream() *quantile.Stream {
+ return quantile.NewTargeted(s.objectives)
+}
+
+// asyncFlush needs bufMtx locked.
+func (s *summary) asyncFlush(now time.Time) {
+ s.mtx.Lock()
+ s.swapBufs(now)
+
+ // Unblock the original goroutine that was responsible for the mutation
+ // that triggered the compaction. But hold onto the global non-buffer
+ // state mutex until the operation finishes.
+ go func() {
+ s.flushColdBuf()
+ s.mtx.Unlock()
+ }()
+}
+
+// rotateStreams needs mtx AND bufMtx locked.
+func (s *summary) maybeRotateStreams() {
+ for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
+ s.headStream.Reset()
+ s.headStreamIdx++
+ if s.headStreamIdx >= len(s.streams) {
+ s.headStreamIdx = 0
+ }
+ s.headStream = s.streams[s.headStreamIdx]
+ s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
+ }
+}
+
+// flushColdBuf needs mtx locked.
+func (s *summary) flushColdBuf() {
+ for _, v := range s.coldBuf {
+ for _, stream := range s.streams {
+ stream.Insert(v)
+ }
+ s.cnt++
+ s.sum += v
+ }
+ s.coldBuf = s.coldBuf[0:0]
+ s.maybeRotateStreams()
+}
+
+// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
+func (s *summary) swapBufs(now time.Time) {
+ if len(s.coldBuf) != 0 {
+ panic("coldBuf is not empty")
+ }
+ s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
+ // hotBuf is now empty and gets new expiration set.
+ for now.After(s.hotBufExpTime) {
+ s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
+ }
+}
+
+type quantSort []*dto.Quantile
+
+func (s quantSort) Len() int {
+ return len(s)
+}
+
+func (s quantSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s quantSort) Less(i, j int) bool {
+ return s[i].GetQuantile() < s[j].GetQuantile()
+}
+
+// SummaryVec is a Collector that bundles a set of Summaries that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewSummaryVec.
+type SummaryVec struct {
+ *MetricVec
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &SummaryVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newSummary(desc, opts, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Summary and not a
+// Metric so that no type conversion is required.
+func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Summary and not a Metric so that no
+// type conversion is required.
+func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
+ return m.MetricVec.WithLabelValues(lvs...).(Summary)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *SummaryVec) With(labels Labels) Summary {
+ return m.MetricVec.With(labels).(Summary)
+}
+
+type constSummary struct {
+ desc *Desc
+ count uint64
+ sum float64
+ quantiles map[float64]float64
+ labelPairs []*dto.LabelPair
+}
+
+func (s *constSummary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *constSummary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.quantiles))
+
+ sum.SampleCount = proto.Uint64(s.count)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for rank, q := range s.quantiles {
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+
+ return nil
+}
+
+// NewConstSummary returns a metric representing a Prometheus summary with fixed
+// values for the count, sum, and quantiles. As those parameters cannot be
+// changed, the returned value does not implement the Summary interface (but
+// only the Metric interface). Users of this package will not have much use for
+// it in regular operations. However, when implementing custom Collectors, it is
+// useful as a throw-away metric that is generated on the fly to send it to
+// Prometheus in the Collect method.
+//
+// quantiles maps ranks to quantile values. For example, a median latency of
+// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
+// map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// NewConstSummary returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstSummary is a version of NewConstSummary that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go b/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go
new file mode 100644
index 000000000..c4575ffbd
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go
@@ -0,0 +1,347 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "math"
+ "math/rand"
+ "sort"
+ "sync"
+ "testing"
+ "testing/quick"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+func benchmarkSummaryObserve(w int, b *testing.B) {
+ b.StopTimer()
+
+ wg := new(sync.WaitGroup)
+ wg.Add(w)
+
+ g := new(sync.WaitGroup)
+ g.Add(1)
+
+ s := NewSummary(SummaryOpts{})
+
+ for i := 0; i < w; i++ {
+ go func() {
+ g.Wait()
+
+ for i := 0; i < b.N; i++ {
+ s.Observe(float64(i))
+ }
+
+ wg.Done()
+ }()
+ }
+
+ b.StartTimer()
+ g.Done()
+ wg.Wait()
+}
+
+func BenchmarkSummaryObserve1(b *testing.B) {
+ benchmarkSummaryObserve(1, b)
+}
+
+func BenchmarkSummaryObserve2(b *testing.B) {
+ benchmarkSummaryObserve(2, b)
+}
+
+func BenchmarkSummaryObserve4(b *testing.B) {
+ benchmarkSummaryObserve(4, b)
+}
+
+func BenchmarkSummaryObserve8(b *testing.B) {
+ benchmarkSummaryObserve(8, b)
+}
+
+func benchmarkSummaryWrite(w int, b *testing.B) {
+ b.StopTimer()
+
+ wg := new(sync.WaitGroup)
+ wg.Add(w)
+
+ g := new(sync.WaitGroup)
+ g.Add(1)
+
+ s := NewSummary(SummaryOpts{})
+
+ for i := 0; i < 1000000; i++ {
+ s.Observe(float64(i))
+ }
+
+ for j := 0; j < w; j++ {
+ outs := make([]dto.Metric, b.N)
+
+ go func(o []dto.Metric) {
+ g.Wait()
+
+ for i := 0; i < b.N; i++ {
+ s.Write(&o[i])
+ }
+
+ wg.Done()
+ }(outs)
+ }
+
+ b.StartTimer()
+ g.Done()
+ wg.Wait()
+}
+
+func BenchmarkSummaryWrite1(b *testing.B) {
+ benchmarkSummaryWrite(1, b)
+}
+
+func BenchmarkSummaryWrite2(b *testing.B) {
+ benchmarkSummaryWrite(2, b)
+}
+
+func BenchmarkSummaryWrite4(b *testing.B) {
+ benchmarkSummaryWrite(4, b)
+}
+
+func BenchmarkSummaryWrite8(b *testing.B) {
+ benchmarkSummaryWrite(8, b)
+}
+
+func TestSummaryConcurrency(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping test in short mode.")
+ }
+
+ rand.Seed(42)
+
+ it := func(n uint32) bool {
+ mutations := int(n%1e4 + 1e4)
+ concLevel := int(n%5 + 1)
+ total := mutations * concLevel
+
+ var start, end sync.WaitGroup
+ start.Add(1)
+ end.Add(concLevel)
+
+ sum := NewSummary(SummaryOpts{
+ Name: "test_summary",
+ Help: "helpless",
+ })
+
+ allVars := make([]float64, total)
+ var sampleSum float64
+ for i := 0; i < concLevel; i++ {
+ vals := make([]float64, mutations)
+ for j := 0; j < mutations; j++ {
+ v := rand.NormFloat64()
+ vals[j] = v
+ allVars[i*mutations+j] = v
+ sampleSum += v
+ }
+
+ go func(vals []float64) {
+ start.Wait()
+ for _, v := range vals {
+ sum.Observe(v)
+ }
+ end.Done()
+ }(vals)
+ }
+ sort.Float64s(allVars)
+ start.Done()
+ end.Wait()
+
+ m := &dto.Metric{}
+ sum.Write(m)
+ if got, want := int(*m.Summary.SampleCount), total; got != want {
+ t.Errorf("got sample count %d, want %d", got, want)
+ }
+ if got, want := *m.Summary.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 {
+ t.Errorf("got sample sum %f, want %f", got, want)
+ }
+
+ objectives := make([]float64, 0, len(DefObjectives))
+ for qu := range DefObjectives {
+ objectives = append(objectives, qu)
+ }
+ sort.Float64s(objectives)
+
+ for i, wantQ := range objectives {
+ ε := DefObjectives[wantQ]
+ gotQ := *m.Summary.Quantile[i].Quantile
+ gotV := *m.Summary.Quantile[i].Value
+ min, max := getBounds(allVars, wantQ, ε)
+ if gotQ != wantQ {
+ t.Errorf("got quantile %f, want %f", gotQ, wantQ)
+ }
+ if gotV < min || gotV > max {
+ t.Errorf("got %f for quantile %f, want [%f,%f]", gotV, gotQ, min, max)
+ }
+ }
+ return true
+ }
+
+ if err := quick.Check(it, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestSummaryVecConcurrency(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping test in short mode.")
+ }
+
+ rand.Seed(42)
+
+ objectives := make([]float64, 0, len(DefObjectives))
+ for qu := range DefObjectives {
+
+ objectives = append(objectives, qu)
+ }
+ sort.Float64s(objectives)
+
+ it := func(n uint32) bool {
+ mutations := int(n%1e4 + 1e4)
+ concLevel := int(n%7 + 1)
+ vecLength := int(n%3 + 1)
+
+ var start, end sync.WaitGroup
+ start.Add(1)
+ end.Add(concLevel)
+
+ sum := NewSummaryVec(
+ SummaryOpts{
+ Name: "test_summary",
+ Help: "helpless",
+ },
+ []string{"label"},
+ )
+
+ allVars := make([][]float64, vecLength)
+ sampleSums := make([]float64, vecLength)
+ for i := 0; i < concLevel; i++ {
+ vals := make([]float64, mutations)
+ picks := make([]int, mutations)
+ for j := 0; j < mutations; j++ {
+ v := rand.NormFloat64()
+ vals[j] = v
+ pick := rand.Intn(vecLength)
+ picks[j] = pick
+ allVars[pick] = append(allVars[pick], v)
+ sampleSums[pick] += v
+ }
+
+ go func(vals []float64) {
+ start.Wait()
+ for i, v := range vals {
+ sum.WithLabelValues(string('A' + picks[i])).Observe(v)
+ }
+ end.Done()
+ }(vals)
+ }
+ for _, vars := range allVars {
+ sort.Float64s(vars)
+ }
+ start.Done()
+ end.Wait()
+
+ for i := 0; i < vecLength; i++ {
+ m := &dto.Metric{}
+ s := sum.WithLabelValues(string('A' + i))
+ s.Write(m)
+ if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want {
+ t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want)
+ }
+ if got, want := *m.Summary.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 {
+ t.Errorf("got sample sum %f for label %c, want %f", got, 'A'+i, want)
+ }
+ for j, wantQ := range objectives {
+ ε := DefObjectives[wantQ]
+ gotQ := *m.Summary.Quantile[j].Quantile
+ gotV := *m.Summary.Quantile[j].Value
+ min, max := getBounds(allVars[i], wantQ, ε)
+ if gotQ != wantQ {
+ t.Errorf("got quantile %f for label %c, want %f", gotQ, 'A'+i, wantQ)
+ }
+ if gotV < min || gotV > max {
+ t.Errorf("got %f for quantile %f for label %c, want [%f,%f]", gotV, gotQ, 'A'+i, min, max)
+ }
+ }
+ }
+ return true
+ }
+
+ if err := quick.Check(it, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestSummaryDecay(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping test in short mode.")
+ // More because it depends on timing than because it is particularly long...
+ }
+
+ sum := NewSummary(SummaryOpts{
+ Name: "test_summary",
+ Help: "helpless",
+ MaxAge: 100 * time.Millisecond,
+ Objectives: map[float64]float64{0.1: 0.001},
+ AgeBuckets: 10,
+ })
+
+ m := &dto.Metric{}
+ i := 0
+ tick := time.NewTicker(time.Millisecond)
+ for _ = range tick.C {
+ i++
+ sum.Observe(float64(i))
+ if i%10 == 0 {
+ sum.Write(m)
+ if got, want := *m.Summary.Quantile[0].Value, math.Max(float64(i)/10, float64(i-90)); math.Abs(got-want) > 20 {
+ t.Errorf("%d. got %f, want %f", i, got, want)
+ }
+ m.Reset()
+ }
+ if i >= 1000 {
+ break
+ }
+ }
+ tick.Stop()
+ // Wait for MaxAge without observations and make sure quantiles are NaN.
+ time.Sleep(100 * time.Millisecond)
+ sum.Write(m)
+ if got := *m.Summary.Quantile[0].Value; !math.IsNaN(got) {
+ t.Errorf("got %f, want NaN after expiration", got)
+ }
+}
+
+func getBounds(vars []float64, q, ε float64) (min, max float64) {
+ // TODO(beorn7): This currently tolerates an error of up to 2*ε. The
+ // error must be at most ε, but for some reason, it's sometimes slightly
+ // higher. That's a bug.
+ n := float64(len(vars))
+ lower := int((q - 2*ε) * n)
+ upper := int(math.Ceil((q + 2*ε) * n))
+ min = vars[0]
+ if lower > 1 {
+ min = vars[lower-1]
+ }
+ max = vars[len(vars)-1]
+ if upper < len(vars) {
+ max = vars[upper-1]
+ }
+ return
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
new file mode 100644
index 000000000..5faf7e6e3
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -0,0 +1,138 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Untyped is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// An Untyped metric works the same as a Gauge. The only difference is that to
+// no type information is implied.
+//
+// To create Untyped instances, use NewUntyped.
+type Untyped interface {
+ Metric
+ Collector
+
+ // Set sets the Untyped metric to an arbitrary value.
+ Set(float64)
+ // Inc increments the Untyped metric by 1.
+ Inc()
+ // Dec decrements the Untyped metric by 1.
+ Dec()
+ // Add adds the given value to the Untyped metric. (The value can be
+ // negative, resulting in a decrease.)
+ Add(float64)
+ // Sub subtracts the given value from the Untyped metric. (The value can
+ // be negative, resulting in an increase.)
+ Sub(float64)
+}
+
+// UntypedOpts is an alias for Opts. See there for doc comments.
+type UntypedOpts Opts
+
+// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
+func NewUntyped(opts UntypedOpts) Untyped {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, 0)
+}
+
+// UntypedVec is a Collector that bundles a set of Untyped metrics that all
+// share the same Desc, but have different values for their variable
+// labels. This is used if you want to count the same thing partitioned by
+// various dimensions. Create instances with NewUntypedVec.
+type UntypedVec struct {
+ *MetricVec
+}
+
+// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &UntypedVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newValue(desc, UntypedValue, 0, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns an Untyped and not a
+// Metric so that no type conversion is required.
+func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns an Untyped and not a Metric so that no
+// type conversion is required.
+func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
+ return m.MetricVec.WithLabelValues(lvs...).(Untyped)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *UntypedVec) With(labels Labels) Untyped {
+ return m.MetricVec.With(labels).(Untyped)
+}
+
+// UntypedFunc is an Untyped whose value is determined at collect time by
+// calling a provided function.
+//
+// To create UntypedFunc instances, use NewUntypedFunc.
+type UntypedFunc interface {
+ Metric
+ Collector
+}
+
+// NewUntypedFunc creates a new UntypedFunc based on the provided
+// UntypedOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where an UntypedFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
new file mode 100644
index 000000000..a944c3775
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -0,0 +1,234 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "sort"
+ "sync/atomic"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// ValueType is an enumeration of metric types that represent a simple value.
+type ValueType int
+
+// Possible values for the ValueType enum.
+const (
+ _ ValueType = iota
+ CounterValue
+ GaugeValue
+ UntypedValue
+)
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+// value is a generic metric for simple values. It implements Metric, Collector,
+// Counter, Gauge, and Untyped. Its effective type is determined by
+// ValueType. This is a low-level building block used by the library to back the
+// implementations of Counter, Gauge, and Untyped.
+type value struct {
+ // valBits containst the bits of the represented float64 value. It has
+ // to go first in the struct to guarantee alignment for atomic
+ // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+
+ selfCollector
+
+ desc *Desc
+ valType ValueType
+ labelPairs []*dto.LabelPair
+}
+
+// newValue returns a newly allocated value with the given Desc, ValueType,
+// sample value and label values. It panics if the number of label
+// values is different from the number of variable labels in Desc.
+func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
+ if len(labelValues) != len(desc.variableLabels) {
+ panic(errInconsistentCardinality)
+ }
+ result := &value{
+ desc: desc,
+ valType: valueType,
+ valBits: math.Float64bits(val),
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ result.init(result)
+ return result
+}
+
+func (v *value) Desc() *Desc {
+ return v.desc
+}
+
+func (v *value) Set(val float64) {
+ atomic.StoreUint64(&v.valBits, math.Float64bits(val))
+}
+
+func (v *value) Inc() {
+ v.Add(1)
+}
+
+func (v *value) Dec() {
+ v.Add(-1)
+}
+
+func (v *value) Add(val float64) {
+ for {
+ oldBits := atomic.LoadUint64(&v.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+ if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (v *value) Sub(val float64) {
+ v.Add(val * -1)
+}
+
+func (v *value) Write(out *dto.Metric) error {
+ val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
+ return populateMetric(v.valType, val, v.labelPairs, out)
+}
+
+// valueFunc is a generic metric for simple values retrieved on collect time
+// from a function. It implements Metric and Collector. Its effective type is
+// determined by ValueType. This is a low-level building block used by the
+// library to back the implementations of CounterFunc, GaugeFunc, and
+// UntypedFunc.
+type valueFunc struct {
+ selfCollector
+
+ desc *Desc
+ valType ValueType
+ function func() float64
+ labelPairs []*dto.LabelPair
+}
+
+// newValueFunc returns a newly allocated valueFunc with the given Desc and
+// ValueType. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a valueFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
+ result := &valueFunc{
+ desc: desc,
+ valType: valueType,
+ function: function,
+ labelPairs: makeLabelPairs(desc, nil),
+ }
+ result.init(result)
+ return result
+}
+
+func (v *valueFunc) Desc() *Desc {
+ return v.desc
+}
+
+func (v *valueFunc) Write(out *dto.Metric) error {
+ return populateMetric(v.valType, v.function(), v.labelPairs, out)
+}
+
+// NewConstMetric returns a metric with one fixed value that cannot be
+// changed. Users of this package will not have much use for it in regular
+// operations. However, when implementing custom Collectors, it is useful as a
+// throw-away metric that is generated on the fly to send it to Prometheus in
+// the Collect method. NewConstMetric returns an error if the length of
+// labelValues is not consistent with the variable labels in Desc.
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constMetric{
+ desc: desc,
+ valType: valueType,
+ val: value,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstMetric is a version of NewConstMetric that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
+ m, err := NewConstMetric(desc, valueType, value, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type constMetric struct {
+ desc *Desc
+ valType ValueType
+ val float64
+ labelPairs []*dto.LabelPair
+}
+
+func (m *constMetric) Desc() *Desc {
+ return m.desc
+}
+
+func (m *constMetric) Write(out *dto.Metric) error {
+ return populateMetric(m.valType, m.val, m.labelPairs, out)
+}
+
+func populateMetric(
+ t ValueType,
+ v float64,
+ labelPairs []*dto.LabelPair,
+ m *dto.Metric,
+) error {
+ m.Label = labelPairs
+ switch t {
+ case CounterValue:
+ m.Counter = &dto.Counter{Value: proto.Float64(v)}
+ case GaugeValue:
+ m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
+ case UntypedValue:
+ m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
+ default:
+ return fmt.Errorf("encountered unknown type %v", t)
+ }
+ return nil
+}
+
+func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
+ totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
+ if totalLen == 0 {
+ // Super fast path.
+ return nil
+ }
+ if len(desc.variableLabels) == 0 {
+ // Moderately fast path.
+ return desc.constLabelPairs
+ }
+ labelPairs := make([]*dto.LabelPair, 0, totalLen)
+ for i, n := range desc.variableLabels {
+ labelPairs = append(labelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(labelValues[i]),
+ })
+ }
+ for _, lp := range desc.constLabelPairs {
+ labelPairs = append(labelPairs, lp)
+ }
+ sort.Sort(LabelPairSorter(labelPairs))
+ return labelPairs
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
new file mode 100644
index 000000000..7f3eef9a4
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -0,0 +1,404 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/prometheus/common/model"
+)
+
+// MetricVec is a Collector to bundle metrics of the same name that
+// differ in their label values. MetricVec is usually not used directly but as a
+// building block for implementations of vectors of a given metric
+// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
+// provided in this package.
+type MetricVec struct {
+ mtx sync.RWMutex // Protects the children.
+ children map[uint64][]metricWithLabelValues
+ desc *Desc
+
+ newMetric func(labelValues ...string) Metric
+ hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
+ hashAddByte func(h uint64, b byte) uint64
+}
+
+// newMetricVec returns an initialized MetricVec. The concrete value is
+// returned for embedding into another struct.
+func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
+ return &MetricVec{
+ children: map[uint64][]metricWithLabelValues{},
+ desc: desc,
+ newMetric: newMetric,
+ hashAdd: hashAdd,
+ hashAddByte: hashAddByte,
+ }
+}
+
+// metricWithLabelValues provides the metric and its label values for
+// disambiguation on hash collision.
+type metricWithLabelValues struct {
+ values []string
+ metric Metric
+}
+
+// Describe implements Collector. The length of the returned slice
+// is always one.
+func (m *MetricVec) Describe(ch chan<- *Desc) {
+ ch <- m.desc
+}
+
+// Collect implements Collector.
+func (m *MetricVec) Collect(ch chan<- Metric) {
+ m.mtx.RLock()
+ defer m.mtx.RUnlock()
+
+ for _, metrics := range m.children {
+ for _, metric := range metrics {
+ ch <- metric.metric
+ }
+ }
+}
+
+// GetMetricWithLabelValues returns the Metric for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Metric is created.
+//
+// It is possible to call this method without using the returned Metric to only
+// create the new Metric but leave it at its start value (e.g. a Summary or
+// Histogram without any observations). See also the SummaryVec example.
+//
+// Keeping the Metric for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Metric from the MetricVec. In that case, the
+// Metric will still exist, but it will not be exported anymore, even if a
+// Metric with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return nil, err
+ }
+
+ return m.getOrCreateMetricWithLabelValues(h, lvs), nil
+}
+
+// GetMetricWith returns the Metric for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Metric is created. Implications of
+// creating a Metric without using it and keeping the Metric for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc.
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return nil, err
+ }
+
+ return m.getOrCreateMetricWithLabels(h, labels), nil
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
+// occurs. The method allows neat syntax like:
+// httpReqs.WithLabelValues("404", "POST").Inc()
+func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
+ metric, err := m.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// With works as GetMetricWith, but panics if an error occurs. The method allows
+// neat syntax like:
+// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
+func (m *MetricVec) With(labels Labels) Metric {
+ metric, err := m.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// DeleteLabelValues removes the metric where the variable labels are the same
+// as those passed in as labels (same order as the VariableLabels in Desc). It
+// returns true if a metric was deleted.
+//
+// It is not an error if the number of label values is not the same as the
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual Metric, so the method will always return false in that
+// case.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
+// alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the CounterVec example.
+func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return false
+ }
+ return m.deleteByHashWithLabelValues(h, lvs)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in the Desc of the MetricVec. However, such
+// inconsistent Labels can never match an actual Metric, so the method will
+// always return false in that case.
+//
+// This method is used for the same purpose as DeleteLabelValues(...string). See
+// there for pros and cons of the two methods.
+func (m *MetricVec) Delete(labels Labels) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return false
+ }
+
+ return m.deleteByHashWithLabels(h, labels)
+}
+
+// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
+// there are multiple matches in the bucket, use lvs to select a metric and
+// remove only that metric.
+func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
+ metrics, ok := m.children[h]
+ if !ok {
+ return false
+ }
+
+ i := m.findMetricWithLabelValues(metrics, lvs)
+ if i >= len(metrics) {
+ return false
+ }
+
+ if len(metrics) > 1 {
+ m.children[h] = append(metrics[:i], metrics[i+1:]...)
+ } else {
+ delete(m.children, h)
+ }
+ return true
+}
+
+// deleteByHashWithLabels removes the metric from the hash bucket h. If there
+// are multiple matches in the bucket, use lvs to select a metric and remove
+// only that metric.
+func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
+ metrics, ok := m.children[h]
+ if !ok {
+ return false
+ }
+ i := m.findMetricWithLabels(metrics, labels)
+ if i >= len(metrics) {
+ return false
+ }
+
+ if len(metrics) > 1 {
+ m.children[h] = append(metrics[:i], metrics[i+1:]...)
+ } else {
+ delete(m.children, h)
+ }
+ return true
+}
+
+// Reset deletes all metrics in this vector.
+func (m *MetricVec) Reset() {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ for h := range m.children {
+ delete(m.children, h)
+ }
+}
+
+func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
+ if len(vals) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ h := hashNew()
+ for _, val := range vals {
+ h = m.hashAdd(h, val)
+ h = m.hashAddByte(h, model.SeparatorByte)
+ }
+ return h, nil
+}
+
+func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
+ if len(labels) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ h := hashNew()
+ for _, label := range m.desc.variableLabels {
+ val, ok := labels[label]
+ if !ok {
+ return 0, fmt.Errorf("label name %q missing in label map", label)
+ }
+ h = m.hashAdd(h, val)
+ h = m.hashAddByte(h, model.SeparatorByte)
+ }
+ return h, nil
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
+ m.mtx.RLock()
+ metric, ok := m.getMetricWithLabelValues(hash, lvs)
+ m.mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ metric, ok = m.getMetricWithLabelValues(hash, lvs)
+ if !ok {
+ // Copy to avoid allocation in case wo don't go down this code path.
+ copiedLVs := make([]string, len(lvs))
+ copy(copiedLVs, lvs)
+ metric = m.newMetric(copiedLVs...)
+ m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
+ }
+ return metric
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
+ m.mtx.RLock()
+ metric, ok := m.getMetricWithLabels(hash, labels)
+ m.mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ metric, ok = m.getMetricWithLabels(hash, labels)
+ if !ok {
+ lvs := m.extractLabelValues(labels)
+ metric = m.newMetric(lvs...)
+ m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
+ }
+ return metric
+}
+
+// getMetricWithLabelValues gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
+ metrics, ok := m.children[h]
+ if ok {
+ if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
+ return metrics[i].metric, true
+ }
+ }
+ return nil, false
+}
+
+// getMetricWithLabels gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
+ metrics, ok := m.children[h]
+ if ok {
+ if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
+ return metrics[i].metric, true
+ }
+ }
+ return nil, false
+}
+
+// findMetricWithLabelValues returns the index of the matching metric or
+// len(metrics) if not found.
+func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
+ for i, metric := range metrics {
+ if m.matchLabelValues(metric.values, lvs) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+// findMetricWithLabels returns the index of the matching metric or len(metrics)
+// if not found.
+func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
+ for i, metric := range metrics {
+ if m.matchLabels(metric.values, labels) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
+ if len(values) != len(lvs) {
+ return false
+ }
+ for i, v := range values {
+ if v != lvs[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
+ if len(labels) != len(values) {
+ return false
+ }
+ for i, k := range m.desc.variableLabels {
+ if values[i] != labels[k] {
+ return false
+ }
+ }
+ return true
+}
+
+func (m *MetricVec) extractLabelValues(labels Labels) []string {
+ labelValues := make([]string, len(labels))
+ for i, k := range m.desc.variableLabels {
+ labelValues[i] = labels[k]
+ }
+ return labelValues
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go b/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go
new file mode 100644
index 000000000..445a6b39f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go
@@ -0,0 +1,312 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "testing"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+func TestDelete(t *testing.T) {
+ vec := NewUntypedVec(
+ UntypedOpts{
+ Name: "test",
+ Help: "helpless",
+ },
+ []string{"l1", "l2"},
+ )
+ testDelete(t, vec)
+}
+
+func TestDeleteWithCollisions(t *testing.T) {
+ vec := NewUntypedVec(
+ UntypedOpts{
+ Name: "test",
+ Help: "helpless",
+ },
+ []string{"l1", "l2"},
+ )
+ vec.hashAdd = func(h uint64, s string) uint64 { return 1 }
+ vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 }
+ testDelete(t, vec)
+}
+
+func testDelete(t *testing.T, vec *UntypedVec) {
+ if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+
+ vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
+ if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+ if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+
+ vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
+ if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+ if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), false; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+
+ vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
+ if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+ if got, want := vec.Delete(Labels{"l1": "v1"}), false; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+}
+
+func TestDeleteLabelValues(t *testing.T) {
+ vec := NewUntypedVec(
+ UntypedOpts{
+ Name: "test",
+ Help: "helpless",
+ },
+ []string{"l1", "l2"},
+ )
+ testDeleteLabelValues(t, vec)
+}
+
+func TestDeleteLabelValuesWithCollisions(t *testing.T) {
+ vec := NewUntypedVec(
+ UntypedOpts{
+ Name: "test",
+ Help: "helpless",
+ },
+ []string{"l1", "l2"},
+ )
+ vec.hashAdd = func(h uint64, s string) uint64 { return 1 }
+ vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 }
+ testDeleteLabelValues(t, vec)
+}
+
+func testDeleteLabelValues(t *testing.T, vec *UntypedVec) {
+ if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+
+ vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
+ vec.With(Labels{"l1": "v1", "l2": "v3"}).(Untyped).Set(42) // Add junk data for collision.
+ if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+ if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+ if got, want := vec.DeleteLabelValues("v1", "v3"), true; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+
+ vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
+ // Delete out of order.
+ if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+ if got, want := vec.DeleteLabelValues("v1"), false; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+}
+
+func TestMetricVec(t *testing.T) {
+ vec := NewUntypedVec(
+ UntypedOpts{
+ Name: "test",
+ Help: "helpless",
+ },
+ []string{"l1", "l2"},
+ )
+ testMetricVec(t, vec)
+}
+
+func TestMetricVecWithCollisions(t *testing.T) {
+ vec := NewUntypedVec(
+ UntypedOpts{
+ Name: "test",
+ Help: "helpless",
+ },
+ []string{"l1", "l2"},
+ )
+ vec.hashAdd = func(h uint64, s string) uint64 { return 1 }
+ vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 }
+ testMetricVec(t, vec)
+}
+
+func testMetricVec(t *testing.T, vec *UntypedVec) {
+ vec.Reset() // Actually test Reset now!
+
+ var pair [2]string
+ // Keep track of metrics.
+ expected := map[[2]string]int{}
+
+ for i := 0; i < 1000; i++ {
+ pair[0], pair[1] = fmt.Sprint(i%4), fmt.Sprint(i%5) // Varying combinations multiples.
+ expected[pair]++
+ vec.WithLabelValues(pair[0], pair[1]).Inc()
+
+ expected[[2]string{"v1", "v2"}]++
+ vec.WithLabelValues("v1", "v2").(Untyped).Inc()
+ }
+
+ var total int
+ for _, metrics := range vec.children {
+ for _, metric := range metrics {
+ total++
+ copy(pair[:], metric.values)
+
+ var metricOut dto.Metric
+ if err := metric.metric.Write(&metricOut); err != nil {
+ t.Fatal(err)
+ }
+ actual := *metricOut.Untyped.Value
+
+ var actualPair [2]string
+ for i, label := range metricOut.Label {
+ actualPair[i] = *label.Value
+ }
+
+ // Test output pair against metric.values to ensure we've selected
+ // the right one. We check this to ensure the below check means
+ // anything at all.
+ if actualPair != pair {
+ t.Fatalf("unexpected pair association in metric map: %v != %v", actualPair, pair)
+ }
+
+ if actual != float64(expected[pair]) {
+ t.Fatalf("incorrect counter value for %v: %v != %v", pair, actual, expected[pair])
+ }
+ }
+ }
+
+ if total != len(expected) {
+ t.Fatalf("unexpected number of metrics: %v != %v", total, len(expected))
+ }
+
+ vec.Reset()
+
+ if len(vec.children) > 0 {
+ t.Fatalf("reset failed")
+ }
+}
+
+func TestCounterVecEndToEndWithCollision(t *testing.T) {
+ vec := NewCounterVec(
+ CounterOpts{
+ Name: "test",
+ Help: "helpless",
+ },
+ []string{"labelname"},
+ )
+ vec.WithLabelValues("77kepQFQ8Kl").Inc()
+ vec.WithLabelValues("!0IC=VloaY").Add(2)
+
+ m := &dto.Metric{}
+ if err := vec.WithLabelValues("77kepQFQ8Kl").Write(m); err != nil {
+ t.Fatal(err)
+ }
+ if got, want := m.GetLabel()[0].GetValue(), "77kepQFQ8Kl"; got != want {
+ t.Errorf("got label value %q, want %q", got, want)
+ }
+ if got, want := m.GetCounter().GetValue(), 1.; got != want {
+ t.Errorf("got value %f, want %f", got, want)
+ }
+ m.Reset()
+ if err := vec.WithLabelValues("!0IC=VloaY").Write(m); err != nil {
+ t.Fatal(err)
+ }
+ if got, want := m.GetLabel()[0].GetValue(), "!0IC=VloaY"; got != want {
+ t.Errorf("got label value %q, want %q", got, want)
+ }
+ if got, want := m.GetCounter().GetValue(), 2.; got != want {
+ t.Errorf("got value %f, want %f", got, want)
+ }
+}
+
+func BenchmarkMetricVecWithLabelValuesBasic(b *testing.B) {
+ benchmarkMetricVecWithLabelValues(b, map[string][]string{
+ "l1": []string{"onevalue"},
+ "l2": []string{"twovalue"},
+ })
+}
+
+func BenchmarkMetricVecWithLabelValues2Keys10ValueCardinality(b *testing.B) {
+ benchmarkMetricVecWithLabelValuesCardinality(b, 2, 10)
+}
+
+func BenchmarkMetricVecWithLabelValues4Keys10ValueCardinality(b *testing.B) {
+ benchmarkMetricVecWithLabelValuesCardinality(b, 4, 10)
+}
+
+func BenchmarkMetricVecWithLabelValues2Keys100ValueCardinality(b *testing.B) {
+ benchmarkMetricVecWithLabelValuesCardinality(b, 2, 100)
+}
+
+func BenchmarkMetricVecWithLabelValues10Keys100ValueCardinality(b *testing.B) {
+ benchmarkMetricVecWithLabelValuesCardinality(b, 10, 100)
+}
+
+func BenchmarkMetricVecWithLabelValues10Keys1000ValueCardinality(b *testing.B) {
+ benchmarkMetricVecWithLabelValuesCardinality(b, 10, 1000)
+}
+
+func benchmarkMetricVecWithLabelValuesCardinality(b *testing.B, nkeys, nvalues int) {
+ labels := map[string][]string{}
+
+ for i := 0; i < nkeys; i++ {
+ var (
+ k = fmt.Sprintf("key-%v", i)
+ vs = make([]string, 0, nvalues)
+ )
+ for j := 0; j < nvalues; j++ {
+ vs = append(vs, fmt.Sprintf("value-%v", j))
+ }
+ labels[k] = vs
+ }
+
+ benchmarkMetricVecWithLabelValues(b, labels)
+}
+
+func benchmarkMetricVecWithLabelValues(b *testing.B, labels map[string][]string) {
+ var keys []string
+ for k := range labels { // Map order dependent, who cares though.
+ keys = append(keys, k)
+ }
+
+ values := make([]string, len(labels)) // Value cache for permutations.
+ vec := NewUntypedVec(
+ UntypedOpts{
+ Name: "test",
+ Help: "helpless",
+ },
+ keys,
+ )
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ // Varies input across provide map entries based on key size.
+ for j, k := range keys {
+ candidates := labels[k]
+ values[j] = candidates[i%len(candidates)]
+ }
+
+ vec.WithLabelValues(values...)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_model/.gitignore b/vendor/github.com/prometheus/client_model/.gitignore
new file mode 100644
index 000000000..2f7896d1d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/.gitignore
@@ -0,0 +1 @@
+target/
diff --git a/vendor/github.com/prometheus/client_model/AUTHORS.md b/vendor/github.com/prometheus/client_model/AUTHORS.md
new file mode 100644
index 000000000..e8b3efa6a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/AUTHORS.md
@@ -0,0 +1,13 @@
+The Prometheus project was started by Matt T. Proud (emeritus) and
+Julius Volz in 2012.
+
+Maintainers of this repository:
+
+* Björn Rabenstein <beorn@soundcloud.com>
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Björn Rabenstein <beorn@soundcloud.com>
+* Matt T. Proud <matt.proud@gmail.com>
+* Tobias Schmidt <ts@soundcloud.com>
diff --git a/vendor/github.com/prometheus/client_model/CONTRIBUTING.md b/vendor/github.com/prometheus/client_model/CONTRIBUTING.md
new file mode 100644
index 000000000..573d58741
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration.
+
+* Relevant coding style guidelines for the Go parts are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/client_model/Makefile b/vendor/github.com/prometheus/client_model/Makefile
new file mode 100644
index 000000000..9cc23b340
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/Makefile
@@ -0,0 +1,61 @@
+# Copyright 2013 Prometheus Team
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+KEY_ID ?= _DEFINE_ME_
+
+all: cpp go java python ruby
+
+SUFFIXES:
+
+cpp: cpp/metrics.pb.cc cpp/metrics.pb.h
+
+cpp/metrics.pb.cc: metrics.proto
+ protoc $< --cpp_out=cpp/
+
+cpp/metrics.pb.h: metrics.proto
+ protoc $< --cpp_out=cpp/
+
+go: go/metrics.pb.go
+
+go/metrics.pb.go: metrics.proto
+ protoc $< --go_out=go/
+
+java: src/main/java/io/prometheus/client/Metrics.java pom.xml
+ mvn clean compile package
+
+src/main/java/io/prometheus/client/Metrics.java: metrics.proto
+ protoc $< --java_out=src/main/java
+
+python: python/prometheus/client/model/metrics_pb2.py
+
+python/prometheus/client/model/metrics_pb2.py: metrics.proto
+ protoc $< --python_out=python/prometheus/client/model
+
+ruby:
+ $(MAKE) -C ruby build
+
+clean:
+ -rm -rf cpp/*
+ -rm -rf go/*
+ -rm -rf java/*
+ -rm -rf python/*
+ -$(MAKE) -C ruby clean
+ -mvn clean
+
+maven-deploy-snapshot: java
+ mvn clean deploy -Dgpg.keyname=$(KEY_ID) -DperformRelease=true
+
+maven-deploy-release: java
+ mvn clean release:clean release:prepare release:perform -Dgpg.keyname=$(KEY_ID) -DperformRelease=true
+
+.PHONY: all clean cpp go java maven-deploy-snapshot maven-deploy-release python ruby
diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE
new file mode 100644
index 000000000..20110e410
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/NOTICE
@@ -0,0 +1,5 @@
+Data model artifacts for Prometheus.
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/client_model/README.md b/vendor/github.com/prometheus/client_model/README.md
new file mode 100644
index 000000000..a710042db
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/README.md
@@ -0,0 +1,26 @@
+# Background
+Under most circumstances, manually downloading this repository should never
+be required.
+
+# Prerequisites
+# Base
+* [Google Protocol Buffers](https://developers.google.com/protocol-buffers)
+
+## Java
+* [Apache Maven](http://maven.apache.org)
+* [Prometheus Maven Repository](https://github.com/prometheus/io.prometheus-maven-repository) checked out into ../io.prometheus-maven-repository
+
+## Go
+* [Go](http://golang.org)
+* [goprotobuf](https://code.google.com/p/goprotobuf)
+
+## Ruby
+* [Ruby](https://www.ruby-lang.org)
+* [bundler](https://rubygems.org/gems/bundler)
+
+# Building
+ $ make
+
+# Getting Started
+ * The Go source code is periodically indexed: [Go Protocol Buffer Model](http://godoc.org/github.com/prometheus/client_model/go).
+ * All of the core developers are accessible via the [Prometheus Developers Mailinglist](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
diff --git a/vendor/github.com/prometheus/client_model/cpp/metrics.pb.cc b/vendor/github.com/prometheus/client_model/cpp/metrics.pb.cc
new file mode 100644
index 000000000..1ff893b83
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/cpp/metrics.pb.cc
@@ -0,0 +1,3380 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: metrics.proto
+
+#define INTERNAL_SUPPRESS_PROTOBUF_FIELD_DEPRECATION
+#include "metrics.pb.h"
+
+#include <algorithm>
+
+#include <google/protobuf/stubs/common.h>
+#include <google/protobuf/stubs/once.h>
+#include <google/protobuf/io/coded_stream.h>
+#include <google/protobuf/wire_format_lite_inl.h>
+#include <google/protobuf/descriptor.h>
+#include <google/protobuf/generated_message_reflection.h>
+#include <google/protobuf/reflection_ops.h>
+#include <google/protobuf/wire_format.h>
+// @@protoc_insertion_point(includes)
+
+namespace io {
+namespace prometheus {
+namespace client {
+
+namespace {
+
+const ::google::protobuf::Descriptor* LabelPair_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+ LabelPair_reflection_ = NULL;
+const ::google::protobuf::Descriptor* Gauge_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+ Gauge_reflection_ = NULL;
+const ::google::protobuf::Descriptor* Counter_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+ Counter_reflection_ = NULL;
+const ::google::protobuf::Descriptor* Quantile_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+ Quantile_reflection_ = NULL;
+const ::google::protobuf::Descriptor* Summary_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+ Summary_reflection_ = NULL;
+const ::google::protobuf::Descriptor* Untyped_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+ Untyped_reflection_ = NULL;
+const ::google::protobuf::Descriptor* Histogram_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+ Histogram_reflection_ = NULL;
+const ::google::protobuf::Descriptor* Bucket_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+ Bucket_reflection_ = NULL;
+const ::google::protobuf::Descriptor* Metric_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+ Metric_reflection_ = NULL;
+const ::google::protobuf::Descriptor* MetricFamily_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+ MetricFamily_reflection_ = NULL;
+const ::google::protobuf::EnumDescriptor* MetricType_descriptor_ = NULL;
+
+} // namespace
+
+
+void protobuf_AssignDesc_metrics_2eproto() {
+ protobuf_AddDesc_metrics_2eproto();
+ const ::google::protobuf::FileDescriptor* file =
+ ::google::protobuf::DescriptorPool::generated_pool()->FindFileByName(
+ "metrics.proto");
+ GOOGLE_CHECK(file != NULL);
+ LabelPair_descriptor_ = file->message_type(0);
+ static const int LabelPair_offsets_[2] = {
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(LabelPair, name_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(LabelPair, value_),
+ };
+ LabelPair_reflection_ =
+ new ::google::protobuf::internal::GeneratedMessageReflection(
+ LabelPair_descriptor_,
+ LabelPair::default_instance_,
+ LabelPair_offsets_,
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(LabelPair, _has_bits_[0]),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(LabelPair, _unknown_fields_),
+ -1,
+ ::google::protobuf::DescriptorPool::generated_pool(),
+ ::google::protobuf::MessageFactory::generated_factory(),
+ sizeof(LabelPair));
+ Gauge_descriptor_ = file->message_type(1);
+ static const int Gauge_offsets_[1] = {
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Gauge, value_),
+ };
+ Gauge_reflection_ =
+ new ::google::protobuf::internal::GeneratedMessageReflection(
+ Gauge_descriptor_,
+ Gauge::default_instance_,
+ Gauge_offsets_,
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Gauge, _has_bits_[0]),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Gauge, _unknown_fields_),
+ -1,
+ ::google::protobuf::DescriptorPool::generated_pool(),
+ ::google::protobuf::MessageFactory::generated_factory(),
+ sizeof(Gauge));
+ Counter_descriptor_ = file->message_type(2);
+ static const int Counter_offsets_[1] = {
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Counter, value_),
+ };
+ Counter_reflection_ =
+ new ::google::protobuf::internal::GeneratedMessageReflection(
+ Counter_descriptor_,
+ Counter::default_instance_,
+ Counter_offsets_,
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Counter, _has_bits_[0]),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Counter, _unknown_fields_),
+ -1,
+ ::google::protobuf::DescriptorPool::generated_pool(),
+ ::google::protobuf::MessageFactory::generated_factory(),
+ sizeof(Counter));
+ Quantile_descriptor_ = file->message_type(3);
+ static const int Quantile_offsets_[2] = {
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Quantile, quantile_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Quantile, value_),
+ };
+ Quantile_reflection_ =
+ new ::google::protobuf::internal::GeneratedMessageReflection(
+ Quantile_descriptor_,
+ Quantile::default_instance_,
+ Quantile_offsets_,
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Quantile, _has_bits_[0]),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Quantile, _unknown_fields_),
+ -1,
+ ::google::protobuf::DescriptorPool::generated_pool(),
+ ::google::protobuf::MessageFactory::generated_factory(),
+ sizeof(Quantile));
+ Summary_descriptor_ = file->message_type(4);
+ static const int Summary_offsets_[3] = {
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Summary, sample_count_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Summary, sample_sum_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Summary, quantile_),
+ };
+ Summary_reflection_ =
+ new ::google::protobuf::internal::GeneratedMessageReflection(
+ Summary_descriptor_,
+ Summary::default_instance_,
+ Summary_offsets_,
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Summary, _has_bits_[0]),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Summary, _unknown_fields_),
+ -1,
+ ::google::protobuf::DescriptorPool::generated_pool(),
+ ::google::protobuf::MessageFactory::generated_factory(),
+ sizeof(Summary));
+ Untyped_descriptor_ = file->message_type(5);
+ static const int Untyped_offsets_[1] = {
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Untyped, value_),
+ };
+ Untyped_reflection_ =
+ new ::google::protobuf::internal::GeneratedMessageReflection(
+ Untyped_descriptor_,
+ Untyped::default_instance_,
+ Untyped_offsets_,
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Untyped, _has_bits_[0]),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Untyped, _unknown_fields_),
+ -1,
+ ::google::protobuf::DescriptorPool::generated_pool(),
+ ::google::protobuf::MessageFactory::generated_factory(),
+ sizeof(Untyped));
+ Histogram_descriptor_ = file->message_type(6);
+ static const int Histogram_offsets_[3] = {
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Histogram, sample_count_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Histogram, sample_sum_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Histogram, bucket_),
+ };
+ Histogram_reflection_ =
+ new ::google::protobuf::internal::GeneratedMessageReflection(
+ Histogram_descriptor_,
+ Histogram::default_instance_,
+ Histogram_offsets_,
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Histogram, _has_bits_[0]),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Histogram, _unknown_fields_),
+ -1,
+ ::google::protobuf::DescriptorPool::generated_pool(),
+ ::google::protobuf::MessageFactory::generated_factory(),
+ sizeof(Histogram));
+ Bucket_descriptor_ = file->message_type(7);
+ static const int Bucket_offsets_[2] = {
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Bucket, cumulative_count_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Bucket, upper_bound_),
+ };
+ Bucket_reflection_ =
+ new ::google::protobuf::internal::GeneratedMessageReflection(
+ Bucket_descriptor_,
+ Bucket::default_instance_,
+ Bucket_offsets_,
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Bucket, _has_bits_[0]),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Bucket, _unknown_fields_),
+ -1,
+ ::google::protobuf::DescriptorPool::generated_pool(),
+ ::google::protobuf::MessageFactory::generated_factory(),
+ sizeof(Bucket));
+ Metric_descriptor_ = file->message_type(8);
+ static const int Metric_offsets_[7] = {
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Metric, label_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Metric, gauge_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Metric, counter_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Metric, summary_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Metric, untyped_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Metric, histogram_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Metric, timestamp_ms_),
+ };
+ Metric_reflection_ =
+ new ::google::protobuf::internal::GeneratedMessageReflection(
+ Metric_descriptor_,
+ Metric::default_instance_,
+ Metric_offsets_,
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Metric, _has_bits_[0]),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Metric, _unknown_fields_),
+ -1,
+ ::google::protobuf::DescriptorPool::generated_pool(),
+ ::google::protobuf::MessageFactory::generated_factory(),
+ sizeof(Metric));
+ MetricFamily_descriptor_ = file->message_type(9);
+ static const int MetricFamily_offsets_[4] = {
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MetricFamily, name_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MetricFamily, help_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MetricFamily, type_),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MetricFamily, metric_),
+ };
+ MetricFamily_reflection_ =
+ new ::google::protobuf::internal::GeneratedMessageReflection(
+ MetricFamily_descriptor_,
+ MetricFamily::default_instance_,
+ MetricFamily_offsets_,
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MetricFamily, _has_bits_[0]),
+ GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MetricFamily, _unknown_fields_),
+ -1,
+ ::google::protobuf::DescriptorPool::generated_pool(),
+ ::google::protobuf::MessageFactory::generated_factory(),
+ sizeof(MetricFamily));
+ MetricType_descriptor_ = file->enum_type(0);
+}
+
+namespace {
+
+GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_AssignDescriptors_once_);
+inline void protobuf_AssignDescriptorsOnce() {
+ ::google::protobuf::GoogleOnceInit(&protobuf_AssignDescriptors_once_,
+ &protobuf_AssignDesc_metrics_2eproto);
+}
+
+void protobuf_RegisterTypes(const ::std::string&) {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+ LabelPair_descriptor_, &LabelPair::default_instance());
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+ Gauge_descriptor_, &Gauge::default_instance());
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+ Counter_descriptor_, &Counter::default_instance());
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+ Quantile_descriptor_, &Quantile::default_instance());
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+ Summary_descriptor_, &Summary::default_instance());
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+ Untyped_descriptor_, &Untyped::default_instance());
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+ Histogram_descriptor_, &Histogram::default_instance());
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+ Bucket_descriptor_, &Bucket::default_instance());
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+ Metric_descriptor_, &Metric::default_instance());
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+ MetricFamily_descriptor_, &MetricFamily::default_instance());
+}
+
+} // namespace
+
+void protobuf_ShutdownFile_metrics_2eproto() {
+ delete LabelPair::default_instance_;
+ delete LabelPair_reflection_;
+ delete Gauge::default_instance_;
+ delete Gauge_reflection_;
+ delete Counter::default_instance_;
+ delete Counter_reflection_;
+ delete Quantile::default_instance_;
+ delete Quantile_reflection_;
+ delete Summary::default_instance_;
+ delete Summary_reflection_;
+ delete Untyped::default_instance_;
+ delete Untyped_reflection_;
+ delete Histogram::default_instance_;
+ delete Histogram_reflection_;
+ delete Bucket::default_instance_;
+ delete Bucket_reflection_;
+ delete Metric::default_instance_;
+ delete Metric_reflection_;
+ delete MetricFamily::default_instance_;
+ delete MetricFamily_reflection_;
+}
+
+void protobuf_AddDesc_metrics_2eproto() {
+ static bool already_here = false;
+ if (already_here) return;
+ already_here = true;
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+
+ ::google::protobuf::DescriptorPool::InternalAddGeneratedFile(
+ "\n\rmetrics.proto\022\024io.prometheus.client\"(\n"
+ "\tLabelPair\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002 \001(\t\""
+ "\026\n\005Gauge\022\r\n\005value\030\001 \001(\001\"\030\n\007Counter\022\r\n\005va"
+ "lue\030\001 \001(\001\"+\n\010Quantile\022\020\n\010quantile\030\001 \001(\001\022"
+ "\r\n\005value\030\002 \001(\001\"e\n\007Summary\022\024\n\014sample_coun"
+ "t\030\001 \001(\004\022\022\n\nsample_sum\030\002 \001(\001\0220\n\010quantile\030"
+ "\003 \003(\0132\036.io.prometheus.client.Quantile\"\030\n"
+ "\007Untyped\022\r\n\005value\030\001 \001(\001\"c\n\tHistogram\022\024\n\014"
+ "sample_count\030\001 \001(\004\022\022\n\nsample_sum\030\002 \001(\001\022,"
+ "\n\006bucket\030\003 \003(\0132\034.io.prometheus.client.Bu"
+ "cket\"7\n\006Bucket\022\030\n\020cumulative_count\030\001 \001(\004"
+ "\022\023\n\013upper_bound\030\002 \001(\001\"\276\002\n\006Metric\022.\n\005labe"
+ "l\030\001 \003(\0132\037.io.prometheus.client.LabelPair"
+ "\022*\n\005gauge\030\002 \001(\0132\033.io.prometheus.client.G"
+ "auge\022.\n\007counter\030\003 \001(\0132\035.io.prometheus.cl"
+ "ient.Counter\022.\n\007summary\030\004 \001(\0132\035.io.prome"
+ "theus.client.Summary\022.\n\007untyped\030\005 \001(\0132\035."
+ "io.prometheus.client.Untyped\0222\n\thistogra"
+ "m\030\007 \001(\0132\037.io.prometheus.client.Histogram"
+ "\022\024\n\014timestamp_ms\030\006 \001(\003\"\210\001\n\014MetricFamily\022"
+ "\014\n\004name\030\001 \001(\t\022\014\n\004help\030\002 \001(\t\022.\n\004type\030\003 \001("
+ "\0162 .io.prometheus.client.MetricType\022,\n\006m"
+ "etric\030\004 \003(\0132\034.io.prometheus.client.Metri"
+ "c*M\n\nMetricType\022\013\n\007COUNTER\020\000\022\t\n\005GAUGE\020\001\022"
+ "\013\n\007SUMMARY\020\002\022\013\n\007UNTYPED\020\003\022\r\n\tHISTOGRAM\020\004"
+ "B\026\n\024io.prometheus.client", 1024);
+ ::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(
+ "metrics.proto", &protobuf_RegisterTypes);
+ LabelPair::default_instance_ = new LabelPair();
+ Gauge::default_instance_ = new Gauge();
+ Counter::default_instance_ = new Counter();
+ Quantile::default_instance_ = new Quantile();
+ Summary::default_instance_ = new Summary();
+ Untyped::default_instance_ = new Untyped();
+ Histogram::default_instance_ = new Histogram();
+ Bucket::default_instance_ = new Bucket();
+ Metric::default_instance_ = new Metric();
+ MetricFamily::default_instance_ = new MetricFamily();
+ LabelPair::default_instance_->InitAsDefaultInstance();
+ Gauge::default_instance_->InitAsDefaultInstance();
+ Counter::default_instance_->InitAsDefaultInstance();
+ Quantile::default_instance_->InitAsDefaultInstance();
+ Summary::default_instance_->InitAsDefaultInstance();
+ Untyped::default_instance_->InitAsDefaultInstance();
+ Histogram::default_instance_->InitAsDefaultInstance();
+ Bucket::default_instance_->InitAsDefaultInstance();
+ Metric::default_instance_->InitAsDefaultInstance();
+ MetricFamily::default_instance_->InitAsDefaultInstance();
+ ::google::protobuf::internal::OnShutdown(&protobuf_ShutdownFile_metrics_2eproto);
+}
+
+// Force AddDescriptors() to be called at static initialization time.
+struct StaticDescriptorInitializer_metrics_2eproto {
+ StaticDescriptorInitializer_metrics_2eproto() {
+ protobuf_AddDesc_metrics_2eproto();
+ }
+} static_descriptor_initializer_metrics_2eproto_;
+const ::google::protobuf::EnumDescriptor* MetricType_descriptor() {
+ protobuf_AssignDescriptorsOnce();
+ return MetricType_descriptor_;
+}
+bool MetricType_IsValid(int value) {
+ switch(value) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int LabelPair::kNameFieldNumber;
+const int LabelPair::kValueFieldNumber;
+#endif // !_MSC_VER
+
+LabelPair::LabelPair()
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ // @@protoc_insertion_point(constructor:io.prometheus.client.LabelPair)
+}
+
+void LabelPair::InitAsDefaultInstance() {
+}
+
+LabelPair::LabelPair(const LabelPair& from)
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ MergeFrom(from);
+ // @@protoc_insertion_point(copy_constructor:io.prometheus.client.LabelPair)
+}
+
+void LabelPair::SharedCtor() {
+ ::google::protobuf::internal::GetEmptyString();
+ _cached_size_ = 0;
+ name_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ value_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+LabelPair::~LabelPair() {
+ // @@protoc_insertion_point(destructor:io.prometheus.client.LabelPair)
+ SharedDtor();
+}
+
+void LabelPair::SharedDtor() {
+ if (name_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ delete name_;
+ }
+ if (value_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ delete value_;
+ }
+ if (this != default_instance_) {
+ }
+}
+
+void LabelPair::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* LabelPair::descriptor() {
+ protobuf_AssignDescriptorsOnce();
+ return LabelPair_descriptor_;
+}
+
+const LabelPair& LabelPair::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto();
+ return *default_instance_;
+}
+
+LabelPair* LabelPair::default_instance_ = NULL;
+
+LabelPair* LabelPair::New() const {
+ return new LabelPair;
+}
+
+void LabelPair::Clear() {
+ if (_has_bits_[0 / 32] & 3) {
+ if (has_name()) {
+ if (name_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ name_->clear();
+ }
+ }
+ if (has_value()) {
+ if (value_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ value_->clear();
+ }
+ }
+ }
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+ mutable_unknown_fields()->Clear();
+}
+
+bool LabelPair::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure
+ ::google::protobuf::uint32 tag;
+ // @@protoc_insertion_point(parse_start:io.prometheus.client.LabelPair)
+ for (;;) {
+ ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
+ tag = p.first;
+ if (!p.second) goto handle_unusual;
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // optional string name = 1;
+ case 1: {
+ if (tag == 10) {
+ DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+ input, this->mutable_name()));
+ ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField(
+ this->name().data(), this->name().length(),
+ ::google::protobuf::internal::WireFormat::PARSE,
+ "name");
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(18)) goto parse_value;
+ break;
+ }
+
+ // optional string value = 2;
+ case 2: {
+ if (tag == 18) {
+ parse_value:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+ input, this->mutable_value()));
+ ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField(
+ this->value().data(), this->value().length(),
+ ::google::protobuf::internal::WireFormat::PARSE,
+ "value");
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectAtEnd()) goto success;
+ break;
+ }
+
+ default: {
+ handle_unusual:
+ if (tag == 0 ||
+ ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ goto success;
+ }
+ DO_(::google::protobuf::internal::WireFormat::SkipField(
+ input, tag, mutable_unknown_fields()));
+ break;
+ }
+ }
+ }
+success:
+ // @@protoc_insertion_point(parse_success:io.prometheus.client.LabelPair)
+ return true;
+failure:
+ // @@protoc_insertion_point(parse_failure:io.prometheus.client.LabelPair)
+ return false;
+#undef DO_
+}
+
+void LabelPair::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // @@protoc_insertion_point(serialize_start:io.prometheus.client.LabelPair)
+ // optional string name = 1;
+ if (has_name()) {
+ ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField(
+ this->name().data(), this->name().length(),
+ ::google::protobuf::internal::WireFormat::SERIALIZE,
+ "name");
+ ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased(
+ 1, this->name(), output);
+ }
+
+ // optional string value = 2;
+ if (has_value()) {
+ ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField(
+ this->value().data(), this->value().length(),
+ ::google::protobuf::internal::WireFormat::SERIALIZE,
+ "value");
+ ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased(
+ 2, this->value(), output);
+ }
+
+ if (!unknown_fields().empty()) {
+ ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
+ unknown_fields(), output);
+ }
+ // @@protoc_insertion_point(serialize_end:io.prometheus.client.LabelPair)
+}
+
+::google::protobuf::uint8* LabelPair::SerializeWithCachedSizesToArray(
+ ::google::protobuf::uint8* target) const {
+ // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.LabelPair)
+ // optional string name = 1;
+ if (has_name()) {
+ ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField(
+ this->name().data(), this->name().length(),
+ ::google::protobuf::internal::WireFormat::SERIALIZE,
+ "name");
+ target =
+ ::google::protobuf::internal::WireFormatLite::WriteStringToArray(
+ 1, this->name(), target);
+ }
+
+ // optional string value = 2;
+ if (has_value()) {
+ ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField(
+ this->value().data(), this->value().length(),
+ ::google::protobuf::internal::WireFormat::SERIALIZE,
+ "value");
+ target =
+ ::google::protobuf::internal::WireFormatLite::WriteStringToArray(
+ 2, this->value(), target);
+ }
+
+ if (!unknown_fields().empty()) {
+ target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
+ unknown_fields(), target);
+ }
+ // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.LabelPair)
+ return target;
+}
+
+int LabelPair::ByteSize() const {
+ int total_size = 0;
+
+ if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ // optional string name = 1;
+ if (has_name()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::StringSize(
+ this->name());
+ }
+
+ // optional string value = 2;
+ if (has_value()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::StringSize(
+ this->value());
+ }
+
+ }
+ if (!unknown_fields().empty()) {
+ total_size +=
+ ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
+ unknown_fields());
+ }
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void LabelPair::MergeFrom(const ::google::protobuf::Message& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ const LabelPair* source =
+ ::google::protobuf::internal::dynamic_cast_if_available<const LabelPair*>(
+ &from);
+ if (source == NULL) {
+ ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+ } else {
+ MergeFrom(*source);
+ }
+}
+
+void LabelPair::MergeFrom(const LabelPair& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ if (from.has_name()) {
+ set_name(from.name());
+ }
+ if (from.has_value()) {
+ set_value(from.value());
+ }
+ }
+ mutable_unknown_fields()->MergeFrom(from.unknown_fields());
+}
+
+void LabelPair::CopyFrom(const ::google::protobuf::Message& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+void LabelPair::CopyFrom(const LabelPair& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool LabelPair::IsInitialized() const {
+
+ return true;
+}
+
+void LabelPair::Swap(LabelPair* other) {
+ if (other != this) {
+ std::swap(name_, other->name_);
+ std::swap(value_, other->value_);
+ std::swap(_has_bits_[0], other->_has_bits_[0]);
+ _unknown_fields_.Swap(&other->_unknown_fields_);
+ std::swap(_cached_size_, other->_cached_size_);
+ }
+}
+
+::google::protobuf::Metadata LabelPair::GetMetadata() const {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::Metadata metadata;
+ metadata.descriptor = LabelPair_descriptor_;
+ metadata.reflection = LabelPair_reflection_;
+ return metadata;
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int Gauge::kValueFieldNumber;
+#endif // !_MSC_VER
+
+Gauge::Gauge()
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ // @@protoc_insertion_point(constructor:io.prometheus.client.Gauge)
+}
+
+void Gauge::InitAsDefaultInstance() {
+}
+
+Gauge::Gauge(const Gauge& from)
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ MergeFrom(from);
+ // @@protoc_insertion_point(copy_constructor:io.prometheus.client.Gauge)
+}
+
+void Gauge::SharedCtor() {
+ _cached_size_ = 0;
+ value_ = 0;
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+Gauge::~Gauge() {
+ // @@protoc_insertion_point(destructor:io.prometheus.client.Gauge)
+ SharedDtor();
+}
+
+void Gauge::SharedDtor() {
+ if (this != default_instance_) {
+ }
+}
+
+void Gauge::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* Gauge::descriptor() {
+ protobuf_AssignDescriptorsOnce();
+ return Gauge_descriptor_;
+}
+
+const Gauge& Gauge::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto();
+ return *default_instance_;
+}
+
+Gauge* Gauge::default_instance_ = NULL;
+
+Gauge* Gauge::New() const {
+ return new Gauge;
+}
+
+void Gauge::Clear() {
+ value_ = 0;
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+ mutable_unknown_fields()->Clear();
+}
+
+bool Gauge::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure
+ ::google::protobuf::uint32 tag;
+ // @@protoc_insertion_point(parse_start:io.prometheus.client.Gauge)
+ for (;;) {
+ ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
+ tag = p.first;
+ if (!p.second) goto handle_unusual;
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // optional double value = 1;
+ case 1: {
+ if (tag == 9) {
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ double, ::google::protobuf::internal::WireFormatLite::TYPE_DOUBLE>(
+ input, &value_)));
+ set_has_value();
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectAtEnd()) goto success;
+ break;
+ }
+
+ default: {
+ handle_unusual:
+ if (tag == 0 ||
+ ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ goto success;
+ }
+ DO_(::google::protobuf::internal::WireFormat::SkipField(
+ input, tag, mutable_unknown_fields()));
+ break;
+ }
+ }
+ }
+success:
+ // @@protoc_insertion_point(parse_success:io.prometheus.client.Gauge)
+ return true;
+failure:
+ // @@protoc_insertion_point(parse_failure:io.prometheus.client.Gauge)
+ return false;
+#undef DO_
+}
+
+void Gauge::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // @@protoc_insertion_point(serialize_start:io.prometheus.client.Gauge)
+ // optional double value = 1;
+ if (has_value()) {
+ ::google::protobuf::internal::WireFormatLite::WriteDouble(1, this->value(), output);
+ }
+
+ if (!unknown_fields().empty()) {
+ ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
+ unknown_fields(), output);
+ }
+ // @@protoc_insertion_point(serialize_end:io.prometheus.client.Gauge)
+}
+
+::google::protobuf::uint8* Gauge::SerializeWithCachedSizesToArray(
+ ::google::protobuf::uint8* target) const {
+ // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.Gauge)
+ // optional double value = 1;
+ if (has_value()) {
+ target = ::google::protobuf::internal::WireFormatLite::WriteDoubleToArray(1, this->value(), target);
+ }
+
+ if (!unknown_fields().empty()) {
+ target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
+ unknown_fields(), target);
+ }
+ // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.Gauge)
+ return target;
+}
+
+int Gauge::ByteSize() const {
+ int total_size = 0;
+
+ if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ // optional double value = 1;
+ if (has_value()) {
+ total_size += 1 + 8;
+ }
+
+ }
+ if (!unknown_fields().empty()) {
+ total_size +=
+ ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
+ unknown_fields());
+ }
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void Gauge::MergeFrom(const ::google::protobuf::Message& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ const Gauge* source =
+ ::google::protobuf::internal::dynamic_cast_if_available<const Gauge*>(
+ &from);
+ if (source == NULL) {
+ ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+ } else {
+ MergeFrom(*source);
+ }
+}
+
+void Gauge::MergeFrom(const Gauge& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ if (from.has_value()) {
+ set_value(from.value());
+ }
+ }
+ mutable_unknown_fields()->MergeFrom(from.unknown_fields());
+}
+
+void Gauge::CopyFrom(const ::google::protobuf::Message& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+void Gauge::CopyFrom(const Gauge& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool Gauge::IsInitialized() const {
+
+ return true;
+}
+
+void Gauge::Swap(Gauge* other) {
+ if (other != this) {
+ std::swap(value_, other->value_);
+ std::swap(_has_bits_[0], other->_has_bits_[0]);
+ _unknown_fields_.Swap(&other->_unknown_fields_);
+ std::swap(_cached_size_, other->_cached_size_);
+ }
+}
+
+::google::protobuf::Metadata Gauge::GetMetadata() const {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::Metadata metadata;
+ metadata.descriptor = Gauge_descriptor_;
+ metadata.reflection = Gauge_reflection_;
+ return metadata;
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int Counter::kValueFieldNumber;
+#endif // !_MSC_VER
+
+Counter::Counter()
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ // @@protoc_insertion_point(constructor:io.prometheus.client.Counter)
+}
+
+void Counter::InitAsDefaultInstance() {
+}
+
+Counter::Counter(const Counter& from)
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ MergeFrom(from);
+ // @@protoc_insertion_point(copy_constructor:io.prometheus.client.Counter)
+}
+
+void Counter::SharedCtor() {
+ _cached_size_ = 0;
+ value_ = 0;
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+Counter::~Counter() {
+ // @@protoc_insertion_point(destructor:io.prometheus.client.Counter)
+ SharedDtor();
+}
+
+void Counter::SharedDtor() {
+ if (this != default_instance_) {
+ }
+}
+
+void Counter::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* Counter::descriptor() {
+ protobuf_AssignDescriptorsOnce();
+ return Counter_descriptor_;
+}
+
+const Counter& Counter::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto();
+ return *default_instance_;
+}
+
+Counter* Counter::default_instance_ = NULL;
+
+Counter* Counter::New() const {
+ return new Counter;
+}
+
+void Counter::Clear() {
+ value_ = 0;
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+ mutable_unknown_fields()->Clear();
+}
+
+bool Counter::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure
+ ::google::protobuf::uint32 tag;
+ // @@protoc_insertion_point(parse_start:io.prometheus.client.Counter)
+ for (;;) {
+ ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
+ tag = p.first;
+ if (!p.second) goto handle_unusual;
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // optional double value = 1;
+ case 1: {
+ if (tag == 9) {
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ double, ::google::protobuf::internal::WireFormatLite::TYPE_DOUBLE>(
+ input, &value_)));
+ set_has_value();
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectAtEnd()) goto success;
+ break;
+ }
+
+ default: {
+ handle_unusual:
+ if (tag == 0 ||
+ ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ goto success;
+ }
+ DO_(::google::protobuf::internal::WireFormat::SkipField(
+ input, tag, mutable_unknown_fields()));
+ break;
+ }
+ }
+ }
+success:
+ // @@protoc_insertion_point(parse_success:io.prometheus.client.Counter)
+ return true;
+failure:
+ // @@protoc_insertion_point(parse_failure:io.prometheus.client.Counter)
+ return false;
+#undef DO_
+}
+
+void Counter::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // @@protoc_insertion_point(serialize_start:io.prometheus.client.Counter)
+ // optional double value = 1;
+ if (has_value()) {
+ ::google::protobuf::internal::WireFormatLite::WriteDouble(1, this->value(), output);
+ }
+
+ if (!unknown_fields().empty()) {
+ ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
+ unknown_fields(), output);
+ }
+ // @@protoc_insertion_point(serialize_end:io.prometheus.client.Counter)
+}
+
+::google::protobuf::uint8* Counter::SerializeWithCachedSizesToArray(
+ ::google::protobuf::uint8* target) const {
+ // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.Counter)
+ // optional double value = 1;
+ if (has_value()) {
+ target = ::google::protobuf::internal::WireFormatLite::WriteDoubleToArray(1, this->value(), target);
+ }
+
+ if (!unknown_fields().empty()) {
+ target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
+ unknown_fields(), target);
+ }
+ // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.Counter)
+ return target;
+}
+
+int Counter::ByteSize() const {
+ int total_size = 0;
+
+ if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ // optional double value = 1;
+ if (has_value()) {
+ total_size += 1 + 8;
+ }
+
+ }
+ if (!unknown_fields().empty()) {
+ total_size +=
+ ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
+ unknown_fields());
+ }
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void Counter::MergeFrom(const ::google::protobuf::Message& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ const Counter* source =
+ ::google::protobuf::internal::dynamic_cast_if_available<const Counter*>(
+ &from);
+ if (source == NULL) {
+ ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+ } else {
+ MergeFrom(*source);
+ }
+}
+
+void Counter::MergeFrom(const Counter& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ if (from.has_value()) {
+ set_value(from.value());
+ }
+ }
+ mutable_unknown_fields()->MergeFrom(from.unknown_fields());
+}
+
+void Counter::CopyFrom(const ::google::protobuf::Message& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+void Counter::CopyFrom(const Counter& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool Counter::IsInitialized() const {
+
+ return true;
+}
+
+void Counter::Swap(Counter* other) {
+ if (other != this) {
+ std::swap(value_, other->value_);
+ std::swap(_has_bits_[0], other->_has_bits_[0]);
+ _unknown_fields_.Swap(&other->_unknown_fields_);
+ std::swap(_cached_size_, other->_cached_size_);
+ }
+}
+
+::google::protobuf::Metadata Counter::GetMetadata() const {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::Metadata metadata;
+ metadata.descriptor = Counter_descriptor_;
+ metadata.reflection = Counter_reflection_;
+ return metadata;
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int Quantile::kQuantileFieldNumber;
+const int Quantile::kValueFieldNumber;
+#endif // !_MSC_VER
+
+Quantile::Quantile()
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ // @@protoc_insertion_point(constructor:io.prometheus.client.Quantile)
+}
+
+void Quantile::InitAsDefaultInstance() {
+}
+
+Quantile::Quantile(const Quantile& from)
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ MergeFrom(from);
+ // @@protoc_insertion_point(copy_constructor:io.prometheus.client.Quantile)
+}
+
+void Quantile::SharedCtor() {
+ _cached_size_ = 0;
+ quantile_ = 0;
+ value_ = 0;
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+Quantile::~Quantile() {
+ // @@protoc_insertion_point(destructor:io.prometheus.client.Quantile)
+ SharedDtor();
+}
+
+void Quantile::SharedDtor() {
+ if (this != default_instance_) {
+ }
+}
+
+void Quantile::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* Quantile::descriptor() {
+ protobuf_AssignDescriptorsOnce();
+ return Quantile_descriptor_;
+}
+
+const Quantile& Quantile::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto();
+ return *default_instance_;
+}
+
+Quantile* Quantile::default_instance_ = NULL;
+
+Quantile* Quantile::New() const {
+ return new Quantile;
+}
+
+void Quantile::Clear() {
+#define OFFSET_OF_FIELD_(f) (reinterpret_cast<char*>( \
+ &reinterpret_cast<Quantile*>(16)->f) - \
+ reinterpret_cast<char*>(16))
+
+#define ZR_(first, last) do { \
+ size_t f = OFFSET_OF_FIELD_(first); \
+ size_t n = OFFSET_OF_FIELD_(last) - f + sizeof(last); \
+ ::memset(&first, 0, n); \
+ } while (0)
+
+ ZR_(quantile_, value_);
+
+#undef OFFSET_OF_FIELD_
+#undef ZR_
+
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+ mutable_unknown_fields()->Clear();
+}
+
+bool Quantile::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure
+ ::google::protobuf::uint32 tag;
+ // @@protoc_insertion_point(parse_start:io.prometheus.client.Quantile)
+ for (;;) {
+ ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
+ tag = p.first;
+ if (!p.second) goto handle_unusual;
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // optional double quantile = 1;
+ case 1: {
+ if (tag == 9) {
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ double, ::google::protobuf::internal::WireFormatLite::TYPE_DOUBLE>(
+ input, &quantile_)));
+ set_has_quantile();
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(17)) goto parse_value;
+ break;
+ }
+
+ // optional double value = 2;
+ case 2: {
+ if (tag == 17) {
+ parse_value:
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ double, ::google::protobuf::internal::WireFormatLite::TYPE_DOUBLE>(
+ input, &value_)));
+ set_has_value();
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectAtEnd()) goto success;
+ break;
+ }
+
+ default: {
+ handle_unusual:
+ if (tag == 0 ||
+ ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ goto success;
+ }
+ DO_(::google::protobuf::internal::WireFormat::SkipField(
+ input, tag, mutable_unknown_fields()));
+ break;
+ }
+ }
+ }
+success:
+ // @@protoc_insertion_point(parse_success:io.prometheus.client.Quantile)
+ return true;
+failure:
+ // @@protoc_insertion_point(parse_failure:io.prometheus.client.Quantile)
+ return false;
+#undef DO_
+}
+
+void Quantile::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // @@protoc_insertion_point(serialize_start:io.prometheus.client.Quantile)
+ // optional double quantile = 1;
+ if (has_quantile()) {
+ ::google::protobuf::internal::WireFormatLite::WriteDouble(1, this->quantile(), output);
+ }
+
+ // optional double value = 2;
+ if (has_value()) {
+ ::google::protobuf::internal::WireFormatLite::WriteDouble(2, this->value(), output);
+ }
+
+ if (!unknown_fields().empty()) {
+ ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
+ unknown_fields(), output);
+ }
+ // @@protoc_insertion_point(serialize_end:io.prometheus.client.Quantile)
+}
+
+::google::protobuf::uint8* Quantile::SerializeWithCachedSizesToArray(
+ ::google::protobuf::uint8* target) const {
+ // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.Quantile)
+ // optional double quantile = 1;
+ if (has_quantile()) {
+ target = ::google::protobuf::internal::WireFormatLite::WriteDoubleToArray(1, this->quantile(), target);
+ }
+
+ // optional double value = 2;
+ if (has_value()) {
+ target = ::google::protobuf::internal::WireFormatLite::WriteDoubleToArray(2, this->value(), target);
+ }
+
+ if (!unknown_fields().empty()) {
+ target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
+ unknown_fields(), target);
+ }
+ // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.Quantile)
+ return target;
+}
+
+int Quantile::ByteSize() const {
+ int total_size = 0;
+
+ if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ // optional double quantile = 1;
+ if (has_quantile()) {
+ total_size += 1 + 8;
+ }
+
+ // optional double value = 2;
+ if (has_value()) {
+ total_size += 1 + 8;
+ }
+
+ }
+ if (!unknown_fields().empty()) {
+ total_size +=
+ ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
+ unknown_fields());
+ }
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void Quantile::MergeFrom(const ::google::protobuf::Message& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ const Quantile* source =
+ ::google::protobuf::internal::dynamic_cast_if_available<const Quantile*>(
+ &from);
+ if (source == NULL) {
+ ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+ } else {
+ MergeFrom(*source);
+ }
+}
+
+void Quantile::MergeFrom(const Quantile& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ if (from.has_quantile()) {
+ set_quantile(from.quantile());
+ }
+ if (from.has_value()) {
+ set_value(from.value());
+ }
+ }
+ mutable_unknown_fields()->MergeFrom(from.unknown_fields());
+}
+
+void Quantile::CopyFrom(const ::google::protobuf::Message& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+void Quantile::CopyFrom(const Quantile& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool Quantile::IsInitialized() const {
+
+ return true;
+}
+
+void Quantile::Swap(Quantile* other) {
+ if (other != this) {
+ std::swap(quantile_, other->quantile_);
+ std::swap(value_, other->value_);
+ std::swap(_has_bits_[0], other->_has_bits_[0]);
+ _unknown_fields_.Swap(&other->_unknown_fields_);
+ std::swap(_cached_size_, other->_cached_size_);
+ }
+}
+
+::google::protobuf::Metadata Quantile::GetMetadata() const {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::Metadata metadata;
+ metadata.descriptor = Quantile_descriptor_;
+ metadata.reflection = Quantile_reflection_;
+ return metadata;
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int Summary::kSampleCountFieldNumber;
+const int Summary::kSampleSumFieldNumber;
+const int Summary::kQuantileFieldNumber;
+#endif // !_MSC_VER
+
+Summary::Summary()
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ // @@protoc_insertion_point(constructor:io.prometheus.client.Summary)
+}
+
+void Summary::InitAsDefaultInstance() {
+}
+
+Summary::Summary(const Summary& from)
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ MergeFrom(from);
+ // @@protoc_insertion_point(copy_constructor:io.prometheus.client.Summary)
+}
+
+void Summary::SharedCtor() {
+ _cached_size_ = 0;
+ sample_count_ = GOOGLE_ULONGLONG(0);
+ sample_sum_ = 0;
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+Summary::~Summary() {
+ // @@protoc_insertion_point(destructor:io.prometheus.client.Summary)
+ SharedDtor();
+}
+
+void Summary::SharedDtor() {
+ if (this != default_instance_) {
+ }
+}
+
+void Summary::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* Summary::descriptor() {
+ protobuf_AssignDescriptorsOnce();
+ return Summary_descriptor_;
+}
+
+const Summary& Summary::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto();
+ return *default_instance_;
+}
+
+Summary* Summary::default_instance_ = NULL;
+
+Summary* Summary::New() const {
+ return new Summary;
+}
+
+void Summary::Clear() {
+#define OFFSET_OF_FIELD_(f) (reinterpret_cast<char*>( \
+ &reinterpret_cast<Summary*>(16)->f) - \
+ reinterpret_cast<char*>(16))
+
+#define ZR_(first, last) do { \
+ size_t f = OFFSET_OF_FIELD_(first); \
+ size_t n = OFFSET_OF_FIELD_(last) - f + sizeof(last); \
+ ::memset(&first, 0, n); \
+ } while (0)
+
+ ZR_(sample_count_, sample_sum_);
+
+#undef OFFSET_OF_FIELD_
+#undef ZR_
+
+ quantile_.Clear();
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+ mutable_unknown_fields()->Clear();
+}
+
+bool Summary::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure
+ ::google::protobuf::uint32 tag;
+ // @@protoc_insertion_point(parse_start:io.prometheus.client.Summary)
+ for (;;) {
+ ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
+ tag = p.first;
+ if (!p.second) goto handle_unusual;
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // optional uint64 sample_count = 1;
+ case 1: {
+ if (tag == 8) {
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ ::google::protobuf::uint64, ::google::protobuf::internal::WireFormatLite::TYPE_UINT64>(
+ input, &sample_count_)));
+ set_has_sample_count();
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(17)) goto parse_sample_sum;
+ break;
+ }
+
+ // optional double sample_sum = 2;
+ case 2: {
+ if (tag == 17) {
+ parse_sample_sum:
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ double, ::google::protobuf::internal::WireFormatLite::TYPE_DOUBLE>(
+ input, &sample_sum_)));
+ set_has_sample_sum();
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(26)) goto parse_quantile;
+ break;
+ }
+
+ // repeated .io.prometheus.client.Quantile quantile = 3;
+ case 3: {
+ if (tag == 26) {
+ parse_quantile:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, add_quantile()));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(26)) goto parse_quantile;
+ if (input->ExpectAtEnd()) goto success;
+ break;
+ }
+
+ default: {
+ handle_unusual:
+ if (tag == 0 ||
+ ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ goto success;
+ }
+ DO_(::google::protobuf::internal::WireFormat::SkipField(
+ input, tag, mutable_unknown_fields()));
+ break;
+ }
+ }
+ }
+success:
+ // @@protoc_insertion_point(parse_success:io.prometheus.client.Summary)
+ return true;
+failure:
+ // @@protoc_insertion_point(parse_failure:io.prometheus.client.Summary)
+ return false;
+#undef DO_
+}
+
+void Summary::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // @@protoc_insertion_point(serialize_start:io.prometheus.client.Summary)
+ // optional uint64 sample_count = 1;
+ if (has_sample_count()) {
+ ::google::protobuf::internal::WireFormatLite::WriteUInt64(1, this->sample_count(), output);
+ }
+
+ // optional double sample_sum = 2;
+ if (has_sample_sum()) {
+ ::google::protobuf::internal::WireFormatLite::WriteDouble(2, this->sample_sum(), output);
+ }
+
+ // repeated .io.prometheus.client.Quantile quantile = 3;
+ for (int i = 0; i < this->quantile_size(); i++) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+ 3, this->quantile(i), output);
+ }
+
+ if (!unknown_fields().empty()) {
+ ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
+ unknown_fields(), output);
+ }
+ // @@protoc_insertion_point(serialize_end:io.prometheus.client.Summary)
+}
+
+::google::protobuf::uint8* Summary::SerializeWithCachedSizesToArray(
+ ::google::protobuf::uint8* target) const {
+ // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.Summary)
+ // optional uint64 sample_count = 1;
+ if (has_sample_count()) {
+ target = ::google::protobuf::internal::WireFormatLite::WriteUInt64ToArray(1, this->sample_count(), target);
+ }
+
+ // optional double sample_sum = 2;
+ if (has_sample_sum()) {
+ target = ::google::protobuf::internal::WireFormatLite::WriteDoubleToArray(2, this->sample_sum(), target);
+ }
+
+ // repeated .io.prometheus.client.Quantile quantile = 3;
+ for (int i = 0; i < this->quantile_size(); i++) {
+ target = ::google::protobuf::internal::WireFormatLite::
+ WriteMessageNoVirtualToArray(
+ 3, this->quantile(i), target);
+ }
+
+ if (!unknown_fields().empty()) {
+ target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
+ unknown_fields(), target);
+ }
+ // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.Summary)
+ return target;
+}
+
+int Summary::ByteSize() const {
+ int total_size = 0;
+
+ if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ // optional uint64 sample_count = 1;
+ if (has_sample_count()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::UInt64Size(
+ this->sample_count());
+ }
+
+ // optional double sample_sum = 2;
+ if (has_sample_sum()) {
+ total_size += 1 + 8;
+ }
+
+ }
+ // repeated .io.prometheus.client.Quantile quantile = 3;
+ total_size += 1 * this->quantile_size();
+ for (int i = 0; i < this->quantile_size(); i++) {
+ total_size +=
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ this->quantile(i));
+ }
+
+ if (!unknown_fields().empty()) {
+ total_size +=
+ ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
+ unknown_fields());
+ }
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void Summary::MergeFrom(const ::google::protobuf::Message& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ const Summary* source =
+ ::google::protobuf::internal::dynamic_cast_if_available<const Summary*>(
+ &from);
+ if (source == NULL) {
+ ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+ } else {
+ MergeFrom(*source);
+ }
+}
+
+void Summary::MergeFrom(const Summary& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ quantile_.MergeFrom(from.quantile_);
+ if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ if (from.has_sample_count()) {
+ set_sample_count(from.sample_count());
+ }
+ if (from.has_sample_sum()) {
+ set_sample_sum(from.sample_sum());
+ }
+ }
+ mutable_unknown_fields()->MergeFrom(from.unknown_fields());
+}
+
+void Summary::CopyFrom(const ::google::protobuf::Message& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+void Summary::CopyFrom(const Summary& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool Summary::IsInitialized() const {
+
+ return true;
+}
+
+void Summary::Swap(Summary* other) {
+ if (other != this) {
+ std::swap(sample_count_, other->sample_count_);
+ std::swap(sample_sum_, other->sample_sum_);
+ quantile_.Swap(&other->quantile_);
+ std::swap(_has_bits_[0], other->_has_bits_[0]);
+ _unknown_fields_.Swap(&other->_unknown_fields_);
+ std::swap(_cached_size_, other->_cached_size_);
+ }
+}
+
+::google::protobuf::Metadata Summary::GetMetadata() const {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::Metadata metadata;
+ metadata.descriptor = Summary_descriptor_;
+ metadata.reflection = Summary_reflection_;
+ return metadata;
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int Untyped::kValueFieldNumber;
+#endif // !_MSC_VER
+
+Untyped::Untyped()
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ // @@protoc_insertion_point(constructor:io.prometheus.client.Untyped)
+}
+
+void Untyped::InitAsDefaultInstance() {
+}
+
+Untyped::Untyped(const Untyped& from)
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ MergeFrom(from);
+ // @@protoc_insertion_point(copy_constructor:io.prometheus.client.Untyped)
+}
+
+void Untyped::SharedCtor() {
+ _cached_size_ = 0;
+ value_ = 0;
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+Untyped::~Untyped() {
+ // @@protoc_insertion_point(destructor:io.prometheus.client.Untyped)
+ SharedDtor();
+}
+
+void Untyped::SharedDtor() {
+ if (this != default_instance_) {
+ }
+}
+
+void Untyped::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* Untyped::descriptor() {
+ protobuf_AssignDescriptorsOnce();
+ return Untyped_descriptor_;
+}
+
+const Untyped& Untyped::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto();
+ return *default_instance_;
+}
+
+Untyped* Untyped::default_instance_ = NULL;
+
+Untyped* Untyped::New() const {
+ return new Untyped;
+}
+
+void Untyped::Clear() {
+ value_ = 0;
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+ mutable_unknown_fields()->Clear();
+}
+
+bool Untyped::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure
+ ::google::protobuf::uint32 tag;
+ // @@protoc_insertion_point(parse_start:io.prometheus.client.Untyped)
+ for (;;) {
+ ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
+ tag = p.first;
+ if (!p.second) goto handle_unusual;
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // optional double value = 1;
+ case 1: {
+ if (tag == 9) {
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ double, ::google::protobuf::internal::WireFormatLite::TYPE_DOUBLE>(
+ input, &value_)));
+ set_has_value();
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectAtEnd()) goto success;
+ break;
+ }
+
+ default: {
+ handle_unusual:
+ if (tag == 0 ||
+ ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ goto success;
+ }
+ DO_(::google::protobuf::internal::WireFormat::SkipField(
+ input, tag, mutable_unknown_fields()));
+ break;
+ }
+ }
+ }
+success:
+ // @@protoc_insertion_point(parse_success:io.prometheus.client.Untyped)
+ return true;
+failure:
+ // @@protoc_insertion_point(parse_failure:io.prometheus.client.Untyped)
+ return false;
+#undef DO_
+}
+
+void Untyped::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // @@protoc_insertion_point(serialize_start:io.prometheus.client.Untyped)
+ // optional double value = 1;
+ if (has_value()) {
+ ::google::protobuf::internal::WireFormatLite::WriteDouble(1, this->value(), output);
+ }
+
+ if (!unknown_fields().empty()) {
+ ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
+ unknown_fields(), output);
+ }
+ // @@protoc_insertion_point(serialize_end:io.prometheus.client.Untyped)
+}
+
+::google::protobuf::uint8* Untyped::SerializeWithCachedSizesToArray(
+ ::google::protobuf::uint8* target) const {
+ // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.Untyped)
+ // optional double value = 1;
+ if (has_value()) {
+ target = ::google::protobuf::internal::WireFormatLite::WriteDoubleToArray(1, this->value(), target);
+ }
+
+ if (!unknown_fields().empty()) {
+ target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
+ unknown_fields(), target);
+ }
+ // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.Untyped)
+ return target;
+}
+
+int Untyped::ByteSize() const {
+ int total_size = 0;
+
+ if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ // optional double value = 1;
+ if (has_value()) {
+ total_size += 1 + 8;
+ }
+
+ }
+ if (!unknown_fields().empty()) {
+ total_size +=
+ ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
+ unknown_fields());
+ }
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void Untyped::MergeFrom(const ::google::protobuf::Message& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ const Untyped* source =
+ ::google::protobuf::internal::dynamic_cast_if_available<const Untyped*>(
+ &from);
+ if (source == NULL) {
+ ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+ } else {
+ MergeFrom(*source);
+ }
+}
+
+void Untyped::MergeFrom(const Untyped& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ if (from.has_value()) {
+ set_value(from.value());
+ }
+ }
+ mutable_unknown_fields()->MergeFrom(from.unknown_fields());
+}
+
+void Untyped::CopyFrom(const ::google::protobuf::Message& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+void Untyped::CopyFrom(const Untyped& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool Untyped::IsInitialized() const {
+
+ return true;
+}
+
+void Untyped::Swap(Untyped* other) {
+ if (other != this) {
+ std::swap(value_, other->value_);
+ std::swap(_has_bits_[0], other->_has_bits_[0]);
+ _unknown_fields_.Swap(&other->_unknown_fields_);
+ std::swap(_cached_size_, other->_cached_size_);
+ }
+}
+
+::google::protobuf::Metadata Untyped::GetMetadata() const {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::Metadata metadata;
+ metadata.descriptor = Untyped_descriptor_;
+ metadata.reflection = Untyped_reflection_;
+ return metadata;
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int Histogram::kSampleCountFieldNumber;
+const int Histogram::kSampleSumFieldNumber;
+const int Histogram::kBucketFieldNumber;
+#endif // !_MSC_VER
+
+Histogram::Histogram()
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ // @@protoc_insertion_point(constructor:io.prometheus.client.Histogram)
+}
+
+void Histogram::InitAsDefaultInstance() {
+}
+
+Histogram::Histogram(const Histogram& from)
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ MergeFrom(from);
+ // @@protoc_insertion_point(copy_constructor:io.prometheus.client.Histogram)
+}
+
+void Histogram::SharedCtor() {
+ _cached_size_ = 0;
+ sample_count_ = GOOGLE_ULONGLONG(0);
+ sample_sum_ = 0;
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+Histogram::~Histogram() {
+ // @@protoc_insertion_point(destructor:io.prometheus.client.Histogram)
+ SharedDtor();
+}
+
+void Histogram::SharedDtor() {
+ if (this != default_instance_) {
+ }
+}
+
+void Histogram::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* Histogram::descriptor() {
+ protobuf_AssignDescriptorsOnce();
+ return Histogram_descriptor_;
+}
+
+const Histogram& Histogram::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto();
+ return *default_instance_;
+}
+
+Histogram* Histogram::default_instance_ = NULL;
+
+Histogram* Histogram::New() const {
+ return new Histogram;
+}
+
+void Histogram::Clear() {
+#define OFFSET_OF_FIELD_(f) (reinterpret_cast<char*>( \
+ &reinterpret_cast<Histogram*>(16)->f) - \
+ reinterpret_cast<char*>(16))
+
+#define ZR_(first, last) do { \
+ size_t f = OFFSET_OF_FIELD_(first); \
+ size_t n = OFFSET_OF_FIELD_(last) - f + sizeof(last); \
+ ::memset(&first, 0, n); \
+ } while (0)
+
+ ZR_(sample_count_, sample_sum_);
+
+#undef OFFSET_OF_FIELD_
+#undef ZR_
+
+ bucket_.Clear();
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+ mutable_unknown_fields()->Clear();
+}
+
+bool Histogram::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure
+ ::google::protobuf::uint32 tag;
+ // @@protoc_insertion_point(parse_start:io.prometheus.client.Histogram)
+ for (;;) {
+ ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
+ tag = p.first;
+ if (!p.second) goto handle_unusual;
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // optional uint64 sample_count = 1;
+ case 1: {
+ if (tag == 8) {
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ ::google::protobuf::uint64, ::google::protobuf::internal::WireFormatLite::TYPE_UINT64>(
+ input, &sample_count_)));
+ set_has_sample_count();
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(17)) goto parse_sample_sum;
+ break;
+ }
+
+ // optional double sample_sum = 2;
+ case 2: {
+ if (tag == 17) {
+ parse_sample_sum:
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ double, ::google::protobuf::internal::WireFormatLite::TYPE_DOUBLE>(
+ input, &sample_sum_)));
+ set_has_sample_sum();
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(26)) goto parse_bucket;
+ break;
+ }
+
+ // repeated .io.prometheus.client.Bucket bucket = 3;
+ case 3: {
+ if (tag == 26) {
+ parse_bucket:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, add_bucket()));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(26)) goto parse_bucket;
+ if (input->ExpectAtEnd()) goto success;
+ break;
+ }
+
+ default: {
+ handle_unusual:
+ if (tag == 0 ||
+ ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ goto success;
+ }
+ DO_(::google::protobuf::internal::WireFormat::SkipField(
+ input, tag, mutable_unknown_fields()));
+ break;
+ }
+ }
+ }
+success:
+ // @@protoc_insertion_point(parse_success:io.prometheus.client.Histogram)
+ return true;
+failure:
+ // @@protoc_insertion_point(parse_failure:io.prometheus.client.Histogram)
+ return false;
+#undef DO_
+}
+
+void Histogram::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // @@protoc_insertion_point(serialize_start:io.prometheus.client.Histogram)
+ // optional uint64 sample_count = 1;
+ if (has_sample_count()) {
+ ::google::protobuf::internal::WireFormatLite::WriteUInt64(1, this->sample_count(), output);
+ }
+
+ // optional double sample_sum = 2;
+ if (has_sample_sum()) {
+ ::google::protobuf::internal::WireFormatLite::WriteDouble(2, this->sample_sum(), output);
+ }
+
+ // repeated .io.prometheus.client.Bucket bucket = 3;
+ for (int i = 0; i < this->bucket_size(); i++) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+ 3, this->bucket(i), output);
+ }
+
+ if (!unknown_fields().empty()) {
+ ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
+ unknown_fields(), output);
+ }
+ // @@protoc_insertion_point(serialize_end:io.prometheus.client.Histogram)
+}
+
+::google::protobuf::uint8* Histogram::SerializeWithCachedSizesToArray(
+ ::google::protobuf::uint8* target) const {
+ // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.Histogram)
+ // optional uint64 sample_count = 1;
+ if (has_sample_count()) {
+ target = ::google::protobuf::internal::WireFormatLite::WriteUInt64ToArray(1, this->sample_count(), target);
+ }
+
+ // optional double sample_sum = 2;
+ if (has_sample_sum()) {
+ target = ::google::protobuf::internal::WireFormatLite::WriteDoubleToArray(2, this->sample_sum(), target);
+ }
+
+ // repeated .io.prometheus.client.Bucket bucket = 3;
+ for (int i = 0; i < this->bucket_size(); i++) {
+ target = ::google::protobuf::internal::WireFormatLite::
+ WriteMessageNoVirtualToArray(
+ 3, this->bucket(i), target);
+ }
+
+ if (!unknown_fields().empty()) {
+ target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
+ unknown_fields(), target);
+ }
+ // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.Histogram)
+ return target;
+}
+
+int Histogram::ByteSize() const {
+ int total_size = 0;
+
+ if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ // optional uint64 sample_count = 1;
+ if (has_sample_count()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::UInt64Size(
+ this->sample_count());
+ }
+
+ // optional double sample_sum = 2;
+ if (has_sample_sum()) {
+ total_size += 1 + 8;
+ }
+
+ }
+ // repeated .io.prometheus.client.Bucket bucket = 3;
+ total_size += 1 * this->bucket_size();
+ for (int i = 0; i < this->bucket_size(); i++) {
+ total_size +=
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ this->bucket(i));
+ }
+
+ if (!unknown_fields().empty()) {
+ total_size +=
+ ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
+ unknown_fields());
+ }
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void Histogram::MergeFrom(const ::google::protobuf::Message& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ const Histogram* source =
+ ::google::protobuf::internal::dynamic_cast_if_available<const Histogram*>(
+ &from);
+ if (source == NULL) {
+ ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+ } else {
+ MergeFrom(*source);
+ }
+}
+
+void Histogram::MergeFrom(const Histogram& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ bucket_.MergeFrom(from.bucket_);
+ if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ if (from.has_sample_count()) {
+ set_sample_count(from.sample_count());
+ }
+ if (from.has_sample_sum()) {
+ set_sample_sum(from.sample_sum());
+ }
+ }
+ mutable_unknown_fields()->MergeFrom(from.unknown_fields());
+}
+
+void Histogram::CopyFrom(const ::google::protobuf::Message& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+void Histogram::CopyFrom(const Histogram& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool Histogram::IsInitialized() const {
+
+ return true;
+}
+
+void Histogram::Swap(Histogram* other) {
+ if (other != this) {
+ std::swap(sample_count_, other->sample_count_);
+ std::swap(sample_sum_, other->sample_sum_);
+ bucket_.Swap(&other->bucket_);
+ std::swap(_has_bits_[0], other->_has_bits_[0]);
+ _unknown_fields_.Swap(&other->_unknown_fields_);
+ std::swap(_cached_size_, other->_cached_size_);
+ }
+}
+
+::google::protobuf::Metadata Histogram::GetMetadata() const {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::Metadata metadata;
+ metadata.descriptor = Histogram_descriptor_;
+ metadata.reflection = Histogram_reflection_;
+ return metadata;
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int Bucket::kCumulativeCountFieldNumber;
+const int Bucket::kUpperBoundFieldNumber;
+#endif // !_MSC_VER
+
+Bucket::Bucket()
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ // @@protoc_insertion_point(constructor:io.prometheus.client.Bucket)
+}
+
+void Bucket::InitAsDefaultInstance() {
+}
+
+Bucket::Bucket(const Bucket& from)
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ MergeFrom(from);
+ // @@protoc_insertion_point(copy_constructor:io.prometheus.client.Bucket)
+}
+
+void Bucket::SharedCtor() {
+ _cached_size_ = 0;
+ cumulative_count_ = GOOGLE_ULONGLONG(0);
+ upper_bound_ = 0;
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+Bucket::~Bucket() {
+ // @@protoc_insertion_point(destructor:io.prometheus.client.Bucket)
+ SharedDtor();
+}
+
+void Bucket::SharedDtor() {
+ if (this != default_instance_) {
+ }
+}
+
+void Bucket::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* Bucket::descriptor() {
+ protobuf_AssignDescriptorsOnce();
+ return Bucket_descriptor_;
+}
+
+const Bucket& Bucket::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto();
+ return *default_instance_;
+}
+
+Bucket* Bucket::default_instance_ = NULL;
+
+Bucket* Bucket::New() const {
+ return new Bucket;
+}
+
+void Bucket::Clear() {
+#define OFFSET_OF_FIELD_(f) (reinterpret_cast<char*>( \
+ &reinterpret_cast<Bucket*>(16)->f) - \
+ reinterpret_cast<char*>(16))
+
+#define ZR_(first, last) do { \
+ size_t f = OFFSET_OF_FIELD_(first); \
+ size_t n = OFFSET_OF_FIELD_(last) - f + sizeof(last); \
+ ::memset(&first, 0, n); \
+ } while (0)
+
+ ZR_(cumulative_count_, upper_bound_);
+
+#undef OFFSET_OF_FIELD_
+#undef ZR_
+
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+ mutable_unknown_fields()->Clear();
+}
+
+bool Bucket::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure
+ ::google::protobuf::uint32 tag;
+ // @@protoc_insertion_point(parse_start:io.prometheus.client.Bucket)
+ for (;;) {
+ ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
+ tag = p.first;
+ if (!p.second) goto handle_unusual;
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // optional uint64 cumulative_count = 1;
+ case 1: {
+ if (tag == 8) {
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ ::google::protobuf::uint64, ::google::protobuf::internal::WireFormatLite::TYPE_UINT64>(
+ input, &cumulative_count_)));
+ set_has_cumulative_count();
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(17)) goto parse_upper_bound;
+ break;
+ }
+
+ // optional double upper_bound = 2;
+ case 2: {
+ if (tag == 17) {
+ parse_upper_bound:
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ double, ::google::protobuf::internal::WireFormatLite::TYPE_DOUBLE>(
+ input, &upper_bound_)));
+ set_has_upper_bound();
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectAtEnd()) goto success;
+ break;
+ }
+
+ default: {
+ handle_unusual:
+ if (tag == 0 ||
+ ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ goto success;
+ }
+ DO_(::google::protobuf::internal::WireFormat::SkipField(
+ input, tag, mutable_unknown_fields()));
+ break;
+ }
+ }
+ }
+success:
+ // @@protoc_insertion_point(parse_success:io.prometheus.client.Bucket)
+ return true;
+failure:
+ // @@protoc_insertion_point(parse_failure:io.prometheus.client.Bucket)
+ return false;
+#undef DO_
+}
+
+void Bucket::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // @@protoc_insertion_point(serialize_start:io.prometheus.client.Bucket)
+ // optional uint64 cumulative_count = 1;
+ if (has_cumulative_count()) {
+ ::google::protobuf::internal::WireFormatLite::WriteUInt64(1, this->cumulative_count(), output);
+ }
+
+ // optional double upper_bound = 2;
+ if (has_upper_bound()) {
+ ::google::protobuf::internal::WireFormatLite::WriteDouble(2, this->upper_bound(), output);
+ }
+
+ if (!unknown_fields().empty()) {
+ ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
+ unknown_fields(), output);
+ }
+ // @@protoc_insertion_point(serialize_end:io.prometheus.client.Bucket)
+}
+
+::google::protobuf::uint8* Bucket::SerializeWithCachedSizesToArray(
+ ::google::protobuf::uint8* target) const {
+ // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.Bucket)
+ // optional uint64 cumulative_count = 1;
+ if (has_cumulative_count()) {
+ target = ::google::protobuf::internal::WireFormatLite::WriteUInt64ToArray(1, this->cumulative_count(), target);
+ }
+
+ // optional double upper_bound = 2;
+ if (has_upper_bound()) {
+ target = ::google::protobuf::internal::WireFormatLite::WriteDoubleToArray(2, this->upper_bound(), target);
+ }
+
+ if (!unknown_fields().empty()) {
+ target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
+ unknown_fields(), target);
+ }
+ // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.Bucket)
+ return target;
+}
+
+int Bucket::ByteSize() const {
+ int total_size = 0;
+
+ if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ // optional uint64 cumulative_count = 1;
+ if (has_cumulative_count()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::UInt64Size(
+ this->cumulative_count());
+ }
+
+ // optional double upper_bound = 2;
+ if (has_upper_bound()) {
+ total_size += 1 + 8;
+ }
+
+ }
+ if (!unknown_fields().empty()) {
+ total_size +=
+ ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
+ unknown_fields());
+ }
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void Bucket::MergeFrom(const ::google::protobuf::Message& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ const Bucket* source =
+ ::google::protobuf::internal::dynamic_cast_if_available<const Bucket*>(
+ &from);
+ if (source == NULL) {
+ ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+ } else {
+ MergeFrom(*source);
+ }
+}
+
+void Bucket::MergeFrom(const Bucket& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ if (from.has_cumulative_count()) {
+ set_cumulative_count(from.cumulative_count());
+ }
+ if (from.has_upper_bound()) {
+ set_upper_bound(from.upper_bound());
+ }
+ }
+ mutable_unknown_fields()->MergeFrom(from.unknown_fields());
+}
+
+void Bucket::CopyFrom(const ::google::protobuf::Message& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+void Bucket::CopyFrom(const Bucket& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool Bucket::IsInitialized() const {
+
+ return true;
+}
+
+void Bucket::Swap(Bucket* other) {
+ if (other != this) {
+ std::swap(cumulative_count_, other->cumulative_count_);
+ std::swap(upper_bound_, other->upper_bound_);
+ std::swap(_has_bits_[0], other->_has_bits_[0]);
+ _unknown_fields_.Swap(&other->_unknown_fields_);
+ std::swap(_cached_size_, other->_cached_size_);
+ }
+}
+
+::google::protobuf::Metadata Bucket::GetMetadata() const {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::Metadata metadata;
+ metadata.descriptor = Bucket_descriptor_;
+ metadata.reflection = Bucket_reflection_;
+ return metadata;
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int Metric::kLabelFieldNumber;
+const int Metric::kGaugeFieldNumber;
+const int Metric::kCounterFieldNumber;
+const int Metric::kSummaryFieldNumber;
+const int Metric::kUntypedFieldNumber;
+const int Metric::kHistogramFieldNumber;
+const int Metric::kTimestampMsFieldNumber;
+#endif // !_MSC_VER
+
+Metric::Metric()
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ // @@protoc_insertion_point(constructor:io.prometheus.client.Metric)
+}
+
+void Metric::InitAsDefaultInstance() {
+ gauge_ = const_cast< ::io::prometheus::client::Gauge*>(&::io::prometheus::client::Gauge::default_instance());
+ counter_ = const_cast< ::io::prometheus::client::Counter*>(&::io::prometheus::client::Counter::default_instance());
+ summary_ = const_cast< ::io::prometheus::client::Summary*>(&::io::prometheus::client::Summary::default_instance());
+ untyped_ = const_cast< ::io::prometheus::client::Untyped*>(&::io::prometheus::client::Untyped::default_instance());
+ histogram_ = const_cast< ::io::prometheus::client::Histogram*>(&::io::prometheus::client::Histogram::default_instance());
+}
+
+Metric::Metric(const Metric& from)
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ MergeFrom(from);
+ // @@protoc_insertion_point(copy_constructor:io.prometheus.client.Metric)
+}
+
+void Metric::SharedCtor() {
+ _cached_size_ = 0;
+ gauge_ = NULL;
+ counter_ = NULL;
+ summary_ = NULL;
+ untyped_ = NULL;
+ histogram_ = NULL;
+ timestamp_ms_ = GOOGLE_LONGLONG(0);
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+Metric::~Metric() {
+ // @@protoc_insertion_point(destructor:io.prometheus.client.Metric)
+ SharedDtor();
+}
+
+void Metric::SharedDtor() {
+ if (this != default_instance_) {
+ delete gauge_;
+ delete counter_;
+ delete summary_;
+ delete untyped_;
+ delete histogram_;
+ }
+}
+
+void Metric::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* Metric::descriptor() {
+ protobuf_AssignDescriptorsOnce();
+ return Metric_descriptor_;
+}
+
+const Metric& Metric::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto();
+ return *default_instance_;
+}
+
+Metric* Metric::default_instance_ = NULL;
+
+Metric* Metric::New() const {
+ return new Metric;
+}
+
+void Metric::Clear() {
+ if (_has_bits_[0 / 32] & 126) {
+ if (has_gauge()) {
+ if (gauge_ != NULL) gauge_->::io::prometheus::client::Gauge::Clear();
+ }
+ if (has_counter()) {
+ if (counter_ != NULL) counter_->::io::prometheus::client::Counter::Clear();
+ }
+ if (has_summary()) {
+ if (summary_ != NULL) summary_->::io::prometheus::client::Summary::Clear();
+ }
+ if (has_untyped()) {
+ if (untyped_ != NULL) untyped_->::io::prometheus::client::Untyped::Clear();
+ }
+ if (has_histogram()) {
+ if (histogram_ != NULL) histogram_->::io::prometheus::client::Histogram::Clear();
+ }
+ timestamp_ms_ = GOOGLE_LONGLONG(0);
+ }
+ label_.Clear();
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+ mutable_unknown_fields()->Clear();
+}
+
+bool Metric::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure
+ ::google::protobuf::uint32 tag;
+ // @@protoc_insertion_point(parse_start:io.prometheus.client.Metric)
+ for (;;) {
+ ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
+ tag = p.first;
+ if (!p.second) goto handle_unusual;
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // repeated .io.prometheus.client.LabelPair label = 1;
+ case 1: {
+ if (tag == 10) {
+ parse_label:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, add_label()));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(10)) goto parse_label;
+ if (input->ExpectTag(18)) goto parse_gauge;
+ break;
+ }
+
+ // optional .io.prometheus.client.Gauge gauge = 2;
+ case 2: {
+ if (tag == 18) {
+ parse_gauge:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, mutable_gauge()));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(26)) goto parse_counter;
+ break;
+ }
+
+ // optional .io.prometheus.client.Counter counter = 3;
+ case 3: {
+ if (tag == 26) {
+ parse_counter:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, mutable_counter()));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(34)) goto parse_summary;
+ break;
+ }
+
+ // optional .io.prometheus.client.Summary summary = 4;
+ case 4: {
+ if (tag == 34) {
+ parse_summary:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, mutable_summary()));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(42)) goto parse_untyped;
+ break;
+ }
+
+ // optional .io.prometheus.client.Untyped untyped = 5;
+ case 5: {
+ if (tag == 42) {
+ parse_untyped:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, mutable_untyped()));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(48)) goto parse_timestamp_ms;
+ break;
+ }
+
+ // optional int64 timestamp_ms = 6;
+ case 6: {
+ if (tag == 48) {
+ parse_timestamp_ms:
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
+ input, &timestamp_ms_)));
+ set_has_timestamp_ms();
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(58)) goto parse_histogram;
+ break;
+ }
+
+ // optional .io.prometheus.client.Histogram histogram = 7;
+ case 7: {
+ if (tag == 58) {
+ parse_histogram:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, mutable_histogram()));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectAtEnd()) goto success;
+ break;
+ }
+
+ default: {
+ handle_unusual:
+ if (tag == 0 ||
+ ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ goto success;
+ }
+ DO_(::google::protobuf::internal::WireFormat::SkipField(
+ input, tag, mutable_unknown_fields()));
+ break;
+ }
+ }
+ }
+success:
+ // @@protoc_insertion_point(parse_success:io.prometheus.client.Metric)
+ return true;
+failure:
+ // @@protoc_insertion_point(parse_failure:io.prometheus.client.Metric)
+ return false;
+#undef DO_
+}
+
+void Metric::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // @@protoc_insertion_point(serialize_start:io.prometheus.client.Metric)
+ // repeated .io.prometheus.client.LabelPair label = 1;
+ for (int i = 0; i < this->label_size(); i++) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+ 1, this->label(i), output);
+ }
+
+ // optional .io.prometheus.client.Gauge gauge = 2;
+ if (has_gauge()) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+ 2, this->gauge(), output);
+ }
+
+ // optional .io.prometheus.client.Counter counter = 3;
+ if (has_counter()) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+ 3, this->counter(), output);
+ }
+
+ // optional .io.prometheus.client.Summary summary = 4;
+ if (has_summary()) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+ 4, this->summary(), output);
+ }
+
+ // optional .io.prometheus.client.Untyped untyped = 5;
+ if (has_untyped()) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+ 5, this->untyped(), output);
+ }
+
+ // optional int64 timestamp_ms = 6;
+ if (has_timestamp_ms()) {
+ ::google::protobuf::internal::WireFormatLite::WriteInt64(6, this->timestamp_ms(), output);
+ }
+
+ // optional .io.prometheus.client.Histogram histogram = 7;
+ if (has_histogram()) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+ 7, this->histogram(), output);
+ }
+
+ if (!unknown_fields().empty()) {
+ ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
+ unknown_fields(), output);
+ }
+ // @@protoc_insertion_point(serialize_end:io.prometheus.client.Metric)
+}
+
+::google::protobuf::uint8* Metric::SerializeWithCachedSizesToArray(
+ ::google::protobuf::uint8* target) const {
+ // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.Metric)
+ // repeated .io.prometheus.client.LabelPair label = 1;
+ for (int i = 0; i < this->label_size(); i++) {
+ target = ::google::protobuf::internal::WireFormatLite::
+ WriteMessageNoVirtualToArray(
+ 1, this->label(i), target);
+ }
+
+ // optional .io.prometheus.client.Gauge gauge = 2;
+ if (has_gauge()) {
+ target = ::google::protobuf::internal::WireFormatLite::
+ WriteMessageNoVirtualToArray(
+ 2, this->gauge(), target);
+ }
+
+ // optional .io.prometheus.client.Counter counter = 3;
+ if (has_counter()) {
+ target = ::google::protobuf::internal::WireFormatLite::
+ WriteMessageNoVirtualToArray(
+ 3, this->counter(), target);
+ }
+
+ // optional .io.prometheus.client.Summary summary = 4;
+ if (has_summary()) {
+ target = ::google::protobuf::internal::WireFormatLite::
+ WriteMessageNoVirtualToArray(
+ 4, this->summary(), target);
+ }
+
+ // optional .io.prometheus.client.Untyped untyped = 5;
+ if (has_untyped()) {
+ target = ::google::protobuf::internal::WireFormatLite::
+ WriteMessageNoVirtualToArray(
+ 5, this->untyped(), target);
+ }
+
+ // optional int64 timestamp_ms = 6;
+ if (has_timestamp_ms()) {
+ target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(6, this->timestamp_ms(), target);
+ }
+
+ // optional .io.prometheus.client.Histogram histogram = 7;
+ if (has_histogram()) {
+ target = ::google::protobuf::internal::WireFormatLite::
+ WriteMessageNoVirtualToArray(
+ 7, this->histogram(), target);
+ }
+
+ if (!unknown_fields().empty()) {
+ target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
+ unknown_fields(), target);
+ }
+ // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.Metric)
+ return target;
+}
+
+int Metric::ByteSize() const {
+ int total_size = 0;
+
+ if (_has_bits_[1 / 32] & (0xffu << (1 % 32))) {
+ // optional .io.prometheus.client.Gauge gauge = 2;
+ if (has_gauge()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ this->gauge());
+ }
+
+ // optional .io.prometheus.client.Counter counter = 3;
+ if (has_counter()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ this->counter());
+ }
+
+ // optional .io.prometheus.client.Summary summary = 4;
+ if (has_summary()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ this->summary());
+ }
+
+ // optional .io.prometheus.client.Untyped untyped = 5;
+ if (has_untyped()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ this->untyped());
+ }
+
+ // optional .io.prometheus.client.Histogram histogram = 7;
+ if (has_histogram()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ this->histogram());
+ }
+
+ // optional int64 timestamp_ms = 6;
+ if (has_timestamp_ms()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::Int64Size(
+ this->timestamp_ms());
+ }
+
+ }
+ // repeated .io.prometheus.client.LabelPair label = 1;
+ total_size += 1 * this->label_size();
+ for (int i = 0; i < this->label_size(); i++) {
+ total_size +=
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ this->label(i));
+ }
+
+ if (!unknown_fields().empty()) {
+ total_size +=
+ ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
+ unknown_fields());
+ }
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void Metric::MergeFrom(const ::google::protobuf::Message& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ const Metric* source =
+ ::google::protobuf::internal::dynamic_cast_if_available<const Metric*>(
+ &from);
+ if (source == NULL) {
+ ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+ } else {
+ MergeFrom(*source);
+ }
+}
+
+void Metric::MergeFrom(const Metric& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ label_.MergeFrom(from.label_);
+ if (from._has_bits_[1 / 32] & (0xffu << (1 % 32))) {
+ if (from.has_gauge()) {
+ mutable_gauge()->::io::prometheus::client::Gauge::MergeFrom(from.gauge());
+ }
+ if (from.has_counter()) {
+ mutable_counter()->::io::prometheus::client::Counter::MergeFrom(from.counter());
+ }
+ if (from.has_summary()) {
+ mutable_summary()->::io::prometheus::client::Summary::MergeFrom(from.summary());
+ }
+ if (from.has_untyped()) {
+ mutable_untyped()->::io::prometheus::client::Untyped::MergeFrom(from.untyped());
+ }
+ if (from.has_histogram()) {
+ mutable_histogram()->::io::prometheus::client::Histogram::MergeFrom(from.histogram());
+ }
+ if (from.has_timestamp_ms()) {
+ set_timestamp_ms(from.timestamp_ms());
+ }
+ }
+ mutable_unknown_fields()->MergeFrom(from.unknown_fields());
+}
+
+void Metric::CopyFrom(const ::google::protobuf::Message& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+void Metric::CopyFrom(const Metric& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool Metric::IsInitialized() const {
+
+ return true;
+}
+
+void Metric::Swap(Metric* other) {
+ if (other != this) {
+ label_.Swap(&other->label_);
+ std::swap(gauge_, other->gauge_);
+ std::swap(counter_, other->counter_);
+ std::swap(summary_, other->summary_);
+ std::swap(untyped_, other->untyped_);
+ std::swap(histogram_, other->histogram_);
+ std::swap(timestamp_ms_, other->timestamp_ms_);
+ std::swap(_has_bits_[0], other->_has_bits_[0]);
+ _unknown_fields_.Swap(&other->_unknown_fields_);
+ std::swap(_cached_size_, other->_cached_size_);
+ }
+}
+
+::google::protobuf::Metadata Metric::GetMetadata() const {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::Metadata metadata;
+ metadata.descriptor = Metric_descriptor_;
+ metadata.reflection = Metric_reflection_;
+ return metadata;
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int MetricFamily::kNameFieldNumber;
+const int MetricFamily::kHelpFieldNumber;
+const int MetricFamily::kTypeFieldNumber;
+const int MetricFamily::kMetricFieldNumber;
+#endif // !_MSC_VER
+
+MetricFamily::MetricFamily()
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ // @@protoc_insertion_point(constructor:io.prometheus.client.MetricFamily)
+}
+
+void MetricFamily::InitAsDefaultInstance() {
+}
+
+MetricFamily::MetricFamily(const MetricFamily& from)
+ : ::google::protobuf::Message() {
+ SharedCtor();
+ MergeFrom(from);
+ // @@protoc_insertion_point(copy_constructor:io.prometheus.client.MetricFamily)
+}
+
+void MetricFamily::SharedCtor() {
+ ::google::protobuf::internal::GetEmptyString();
+ _cached_size_ = 0;
+ name_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ help_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ type_ = 0;
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+MetricFamily::~MetricFamily() {
+ // @@protoc_insertion_point(destructor:io.prometheus.client.MetricFamily)
+ SharedDtor();
+}
+
+void MetricFamily::SharedDtor() {
+ if (name_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ delete name_;
+ }
+ if (help_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ delete help_;
+ }
+ if (this != default_instance_) {
+ }
+}
+
+void MetricFamily::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* MetricFamily::descriptor() {
+ protobuf_AssignDescriptorsOnce();
+ return MetricFamily_descriptor_;
+}
+
+const MetricFamily& MetricFamily::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto();
+ return *default_instance_;
+}
+
+MetricFamily* MetricFamily::default_instance_ = NULL;
+
+MetricFamily* MetricFamily::New() const {
+ return new MetricFamily;
+}
+
+void MetricFamily::Clear() {
+ if (_has_bits_[0 / 32] & 7) {
+ if (has_name()) {
+ if (name_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ name_->clear();
+ }
+ }
+ if (has_help()) {
+ if (help_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ help_->clear();
+ }
+ }
+ type_ = 0;
+ }
+ metric_.Clear();
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+ mutable_unknown_fields()->Clear();
+}
+
+bool MetricFamily::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure
+ ::google::protobuf::uint32 tag;
+ // @@protoc_insertion_point(parse_start:io.prometheus.client.MetricFamily)
+ for (;;) {
+ ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127);
+ tag = p.first;
+ if (!p.second) goto handle_unusual;
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // optional string name = 1;
+ case 1: {
+ if (tag == 10) {
+ DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+ input, this->mutable_name()));
+ ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField(
+ this->name().data(), this->name().length(),
+ ::google::protobuf::internal::WireFormat::PARSE,
+ "name");
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(18)) goto parse_help;
+ break;
+ }
+
+ // optional string help = 2;
+ case 2: {
+ if (tag == 18) {
+ parse_help:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+ input, this->mutable_help()));
+ ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField(
+ this->help().data(), this->help().length(),
+ ::google::protobuf::internal::WireFormat::PARSE,
+ "help");
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(24)) goto parse_type;
+ break;
+ }
+
+ // optional .io.prometheus.client.MetricType type = 3;
+ case 3: {
+ if (tag == 24) {
+ parse_type:
+ int value;
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
+ input, &value)));
+ if (::io::prometheus::client::MetricType_IsValid(value)) {
+ set_type(static_cast< ::io::prometheus::client::MetricType >(value));
+ } else {
+ mutable_unknown_fields()->AddVarint(3, value);
+ }
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(34)) goto parse_metric;
+ break;
+ }
+
+ // repeated .io.prometheus.client.Metric metric = 4;
+ case 4: {
+ if (tag == 34) {
+ parse_metric:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, add_metric()));
+ } else {
+ goto handle_unusual;
+ }
+ if (input->ExpectTag(34)) goto parse_metric;
+ if (input->ExpectAtEnd()) goto success;
+ break;
+ }
+
+ default: {
+ handle_unusual:
+ if (tag == 0 ||
+ ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ goto success;
+ }
+ DO_(::google::protobuf::internal::WireFormat::SkipField(
+ input, tag, mutable_unknown_fields()));
+ break;
+ }
+ }
+ }
+success:
+ // @@protoc_insertion_point(parse_success:io.prometheus.client.MetricFamily)
+ return true;
+failure:
+ // @@protoc_insertion_point(parse_failure:io.prometheus.client.MetricFamily)
+ return false;
+#undef DO_
+}
+
+void MetricFamily::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // @@protoc_insertion_point(serialize_start:io.prometheus.client.MetricFamily)
+ // optional string name = 1;
+ if (has_name()) {
+ ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField(
+ this->name().data(), this->name().length(),
+ ::google::protobuf::internal::WireFormat::SERIALIZE,
+ "name");
+ ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased(
+ 1, this->name(), output);
+ }
+
+ // optional string help = 2;
+ if (has_help()) {
+ ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField(
+ this->help().data(), this->help().length(),
+ ::google::protobuf::internal::WireFormat::SERIALIZE,
+ "help");
+ ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased(
+ 2, this->help(), output);
+ }
+
+ // optional .io.prometheus.client.MetricType type = 3;
+ if (has_type()) {
+ ::google::protobuf::internal::WireFormatLite::WriteEnum(
+ 3, this->type(), output);
+ }
+
+ // repeated .io.prometheus.client.Metric metric = 4;
+ for (int i = 0; i < this->metric_size(); i++) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+ 4, this->metric(i), output);
+ }
+
+ if (!unknown_fields().empty()) {
+ ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
+ unknown_fields(), output);
+ }
+ // @@protoc_insertion_point(serialize_end:io.prometheus.client.MetricFamily)
+}
+
+::google::protobuf::uint8* MetricFamily::SerializeWithCachedSizesToArray(
+ ::google::protobuf::uint8* target) const {
+ // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.MetricFamily)
+ // optional string name = 1;
+ if (has_name()) {
+ ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField(
+ this->name().data(), this->name().length(),
+ ::google::protobuf::internal::WireFormat::SERIALIZE,
+ "name");
+ target =
+ ::google::protobuf::internal::WireFormatLite::WriteStringToArray(
+ 1, this->name(), target);
+ }
+
+ // optional string help = 2;
+ if (has_help()) {
+ ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField(
+ this->help().data(), this->help().length(),
+ ::google::protobuf::internal::WireFormat::SERIALIZE,
+ "help");
+ target =
+ ::google::protobuf::internal::WireFormatLite::WriteStringToArray(
+ 2, this->help(), target);
+ }
+
+ // optional .io.prometheus.client.MetricType type = 3;
+ if (has_type()) {
+ target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray(
+ 3, this->type(), target);
+ }
+
+ // repeated .io.prometheus.client.Metric metric = 4;
+ for (int i = 0; i < this->metric_size(); i++) {
+ target = ::google::protobuf::internal::WireFormatLite::
+ WriteMessageNoVirtualToArray(
+ 4, this->metric(i), target);
+ }
+
+ if (!unknown_fields().empty()) {
+ target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
+ unknown_fields(), target);
+ }
+ // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.MetricFamily)
+ return target;
+}
+
+int MetricFamily::ByteSize() const {
+ int total_size = 0;
+
+ if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ // optional string name = 1;
+ if (has_name()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::StringSize(
+ this->name());
+ }
+
+ // optional string help = 2;
+ if (has_help()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::StringSize(
+ this->help());
+ }
+
+ // optional .io.prometheus.client.MetricType type = 3;
+ if (has_type()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::EnumSize(this->type());
+ }
+
+ }
+ // repeated .io.prometheus.client.Metric metric = 4;
+ total_size += 1 * this->metric_size();
+ for (int i = 0; i < this->metric_size(); i++) {
+ total_size +=
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ this->metric(i));
+ }
+
+ if (!unknown_fields().empty()) {
+ total_size +=
+ ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
+ unknown_fields());
+ }
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void MetricFamily::MergeFrom(const ::google::protobuf::Message& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ const MetricFamily* source =
+ ::google::protobuf::internal::dynamic_cast_if_available<const MetricFamily*>(
+ &from);
+ if (source == NULL) {
+ ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+ } else {
+ MergeFrom(*source);
+ }
+}
+
+void MetricFamily::MergeFrom(const MetricFamily& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ metric_.MergeFrom(from.metric_);
+ if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ if (from.has_name()) {
+ set_name(from.name());
+ }
+ if (from.has_help()) {
+ set_help(from.help());
+ }
+ if (from.has_type()) {
+ set_type(from.type());
+ }
+ }
+ mutable_unknown_fields()->MergeFrom(from.unknown_fields());
+}
+
+void MetricFamily::CopyFrom(const ::google::protobuf::Message& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+void MetricFamily::CopyFrom(const MetricFamily& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool MetricFamily::IsInitialized() const {
+
+ return true;
+}
+
+void MetricFamily::Swap(MetricFamily* other) {
+ if (other != this) {
+ std::swap(name_, other->name_);
+ std::swap(help_, other->help_);
+ std::swap(type_, other->type_);
+ metric_.Swap(&other->metric_);
+ std::swap(_has_bits_[0], other->_has_bits_[0]);
+ _unknown_fields_.Swap(&other->_unknown_fields_);
+ std::swap(_cached_size_, other->_cached_size_);
+ }
+}
+
+::google::protobuf::Metadata MetricFamily::GetMetadata() const {
+ protobuf_AssignDescriptorsOnce();
+ ::google::protobuf::Metadata metadata;
+ metadata.descriptor = MetricFamily_descriptor_;
+ metadata.reflection = MetricFamily_reflection_;
+ return metadata;
+}
+
+
+// @@protoc_insertion_point(namespace_scope)
+
+} // namespace client
+} // namespace prometheus
+} // namespace io
+
+// @@protoc_insertion_point(global_scope)
diff --git a/vendor/github.com/prometheus/client_model/cpp/metrics.pb.h b/vendor/github.com/prometheus/client_model/cpp/metrics.pb.h
new file mode 100644
index 000000000..206ba3704
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/cpp/metrics.pb.h
@@ -0,0 +1,2072 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: metrics.proto
+
+#ifndef PROTOBUF_metrics_2eproto__INCLUDED
+#define PROTOBUF_metrics_2eproto__INCLUDED
+
+#include <string>
+
+#include <google/protobuf/stubs/common.h>
+
+#if GOOGLE_PROTOBUF_VERSION < 2006000
+#error This file was generated by a newer version of protoc which is
+#error incompatible with your Protocol Buffer headers. Please update
+#error your headers.
+#endif
+#if 2006001 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
+#error This file was generated by an older version of protoc which is
+#error incompatible with your Protocol Buffer headers. Please
+#error regenerate this file with a newer version of protoc.
+#endif
+
+#include <google/protobuf/generated_message_util.h>
+#include <google/protobuf/message.h>
+#include <google/protobuf/repeated_field.h>
+#include <google/protobuf/extension_set.h>
+#include <google/protobuf/generated_enum_reflection.h>
+#include <google/protobuf/unknown_field_set.h>
+// @@protoc_insertion_point(includes)
+
+namespace io {
+namespace prometheus {
+namespace client {
+
+// Internal implementation detail -- do not call these.
+void protobuf_AddDesc_metrics_2eproto();
+void protobuf_AssignDesc_metrics_2eproto();
+void protobuf_ShutdownFile_metrics_2eproto();
+
+class LabelPair;
+class Gauge;
+class Counter;
+class Quantile;
+class Summary;
+class Untyped;
+class Histogram;
+class Bucket;
+class Metric;
+class MetricFamily;
+
+enum MetricType {
+ COUNTER = 0,
+ GAUGE = 1,
+ SUMMARY = 2,
+ UNTYPED = 3,
+ HISTOGRAM = 4
+};
+bool MetricType_IsValid(int value);
+const MetricType MetricType_MIN = COUNTER;
+const MetricType MetricType_MAX = HISTOGRAM;
+const int MetricType_ARRAYSIZE = MetricType_MAX + 1;
+
+const ::google::protobuf::EnumDescriptor* MetricType_descriptor();
+inline const ::std::string& MetricType_Name(MetricType value) {
+ return ::google::protobuf::internal::NameOfEnum(
+ MetricType_descriptor(), value);
+}
+inline bool MetricType_Parse(
+ const ::std::string& name, MetricType* value) {
+ return ::google::protobuf::internal::ParseNamedEnum<MetricType>(
+ MetricType_descriptor(), name, value);
+}
+// ===================================================================
+
+class LabelPair : public ::google::protobuf::Message {
+ public:
+ LabelPair();
+ virtual ~LabelPair();
+
+ LabelPair(const LabelPair& from);
+
+ inline LabelPair& operator=(const LabelPair& from) {
+ CopyFrom(from);
+ return *this;
+ }
+
+ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+ return _unknown_fields_;
+ }
+
+ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+ return &_unknown_fields_;
+ }
+
+ static const ::google::protobuf::Descriptor* descriptor();
+ static const LabelPair& default_instance();
+
+ void Swap(LabelPair* other);
+
+ // implements Message ----------------------------------------------
+
+ LabelPair* New() const;
+ void CopyFrom(const ::google::protobuf::Message& from);
+ void MergeFrom(const ::google::protobuf::Message& from);
+ void CopyFrom(const LabelPair& from);
+ void MergeFrom(const LabelPair& from);
+ void Clear();
+ bool IsInitialized() const;
+
+ int ByteSize() const;
+ bool MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input);
+ void SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const;
+ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+ int GetCachedSize() const { return _cached_size_; }
+ private:
+ void SharedCtor();
+ void SharedDtor();
+ void SetCachedSize(int size) const;
+ public:
+ ::google::protobuf::Metadata GetMetadata() const;
+
+ // nested types ----------------------------------------------------
+
+ // accessors -------------------------------------------------------
+
+ // optional string name = 1;
+ inline bool has_name() const;
+ inline void clear_name();
+ static const int kNameFieldNumber = 1;
+ inline const ::std::string& name() const;
+ inline void set_name(const ::std::string& value);
+ inline void set_name(const char* value);
+ inline void set_name(const char* value, size_t size);
+ inline ::std::string* mutable_name();
+ inline ::std::string* release_name();
+ inline void set_allocated_name(::std::string* name);
+
+ // optional string value = 2;
+ inline bool has_value() const;
+ inline void clear_value();
+ static const int kValueFieldNumber = 2;
+ inline const ::std::string& value() const;
+ inline void set_value(const ::std::string& value);
+ inline void set_value(const char* value);
+ inline void set_value(const char* value, size_t size);
+ inline ::std::string* mutable_value();
+ inline ::std::string* release_value();
+ inline void set_allocated_value(::std::string* value);
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.LabelPair)
+ private:
+ inline void set_has_name();
+ inline void clear_has_name();
+ inline void set_has_value();
+ inline void clear_has_value();
+
+ ::google::protobuf::UnknownFieldSet _unknown_fields_;
+
+ ::google::protobuf::uint32 _has_bits_[1];
+ mutable int _cached_size_;
+ ::std::string* name_;
+ ::std::string* value_;
+ friend void protobuf_AddDesc_metrics_2eproto();
+ friend void protobuf_AssignDesc_metrics_2eproto();
+ friend void protobuf_ShutdownFile_metrics_2eproto();
+
+ void InitAsDefaultInstance();
+ static LabelPair* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class Gauge : public ::google::protobuf::Message {
+ public:
+ Gauge();
+ virtual ~Gauge();
+
+ Gauge(const Gauge& from);
+
+ inline Gauge& operator=(const Gauge& from) {
+ CopyFrom(from);
+ return *this;
+ }
+
+ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+ return _unknown_fields_;
+ }
+
+ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+ return &_unknown_fields_;
+ }
+
+ static const ::google::protobuf::Descriptor* descriptor();
+ static const Gauge& default_instance();
+
+ void Swap(Gauge* other);
+
+ // implements Message ----------------------------------------------
+
+ Gauge* New() const;
+ void CopyFrom(const ::google::protobuf::Message& from);
+ void MergeFrom(const ::google::protobuf::Message& from);
+ void CopyFrom(const Gauge& from);
+ void MergeFrom(const Gauge& from);
+ void Clear();
+ bool IsInitialized() const;
+
+ int ByteSize() const;
+ bool MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input);
+ void SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const;
+ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+ int GetCachedSize() const { return _cached_size_; }
+ private:
+ void SharedCtor();
+ void SharedDtor();
+ void SetCachedSize(int size) const;
+ public:
+ ::google::protobuf::Metadata GetMetadata() const;
+
+ // nested types ----------------------------------------------------
+
+ // accessors -------------------------------------------------------
+
+ // optional double value = 1;
+ inline bool has_value() const;
+ inline void clear_value();
+ static const int kValueFieldNumber = 1;
+ inline double value() const;
+ inline void set_value(double value);
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.Gauge)
+ private:
+ inline void set_has_value();
+ inline void clear_has_value();
+
+ ::google::protobuf::UnknownFieldSet _unknown_fields_;
+
+ ::google::protobuf::uint32 _has_bits_[1];
+ mutable int _cached_size_;
+ double value_;
+ friend void protobuf_AddDesc_metrics_2eproto();
+ friend void protobuf_AssignDesc_metrics_2eproto();
+ friend void protobuf_ShutdownFile_metrics_2eproto();
+
+ void InitAsDefaultInstance();
+ static Gauge* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class Counter : public ::google::protobuf::Message {
+ public:
+ Counter();
+ virtual ~Counter();
+
+ Counter(const Counter& from);
+
+ inline Counter& operator=(const Counter& from) {
+ CopyFrom(from);
+ return *this;
+ }
+
+ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+ return _unknown_fields_;
+ }
+
+ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+ return &_unknown_fields_;
+ }
+
+ static const ::google::protobuf::Descriptor* descriptor();
+ static const Counter& default_instance();
+
+ void Swap(Counter* other);
+
+ // implements Message ----------------------------------------------
+
+ Counter* New() const;
+ void CopyFrom(const ::google::protobuf::Message& from);
+ void MergeFrom(const ::google::protobuf::Message& from);
+ void CopyFrom(const Counter& from);
+ void MergeFrom(const Counter& from);
+ void Clear();
+ bool IsInitialized() const;
+
+ int ByteSize() const;
+ bool MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input);
+ void SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const;
+ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+ int GetCachedSize() const { return _cached_size_; }
+ private:
+ void SharedCtor();
+ void SharedDtor();
+ void SetCachedSize(int size) const;
+ public:
+ ::google::protobuf::Metadata GetMetadata() const;
+
+ // nested types ----------------------------------------------------
+
+ // accessors -------------------------------------------------------
+
+ // optional double value = 1;
+ inline bool has_value() const;
+ inline void clear_value();
+ static const int kValueFieldNumber = 1;
+ inline double value() const;
+ inline void set_value(double value);
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.Counter)
+ private:
+ inline void set_has_value();
+ inline void clear_has_value();
+
+ ::google::protobuf::UnknownFieldSet _unknown_fields_;
+
+ ::google::protobuf::uint32 _has_bits_[1];
+ mutable int _cached_size_;
+ double value_;
+ friend void protobuf_AddDesc_metrics_2eproto();
+ friend void protobuf_AssignDesc_metrics_2eproto();
+ friend void protobuf_ShutdownFile_metrics_2eproto();
+
+ void InitAsDefaultInstance();
+ static Counter* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class Quantile : public ::google::protobuf::Message {
+ public:
+ Quantile();
+ virtual ~Quantile();
+
+ Quantile(const Quantile& from);
+
+ inline Quantile& operator=(const Quantile& from) {
+ CopyFrom(from);
+ return *this;
+ }
+
+ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+ return _unknown_fields_;
+ }
+
+ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+ return &_unknown_fields_;
+ }
+
+ static const ::google::protobuf::Descriptor* descriptor();
+ static const Quantile& default_instance();
+
+ void Swap(Quantile* other);
+
+ // implements Message ----------------------------------------------
+
+ Quantile* New() const;
+ void CopyFrom(const ::google::protobuf::Message& from);
+ void MergeFrom(const ::google::protobuf::Message& from);
+ void CopyFrom(const Quantile& from);
+ void MergeFrom(const Quantile& from);
+ void Clear();
+ bool IsInitialized() const;
+
+ int ByteSize() const;
+ bool MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input);
+ void SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const;
+ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+ int GetCachedSize() const { return _cached_size_; }
+ private:
+ void SharedCtor();
+ void SharedDtor();
+ void SetCachedSize(int size) const;
+ public:
+ ::google::protobuf::Metadata GetMetadata() const;
+
+ // nested types ----------------------------------------------------
+
+ // accessors -------------------------------------------------------
+
+ // optional double quantile = 1;
+ inline bool has_quantile() const;
+ inline void clear_quantile();
+ static const int kQuantileFieldNumber = 1;
+ inline double quantile() const;
+ inline void set_quantile(double value);
+
+ // optional double value = 2;
+ inline bool has_value() const;
+ inline void clear_value();
+ static const int kValueFieldNumber = 2;
+ inline double value() const;
+ inline void set_value(double value);
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.Quantile)
+ private:
+ inline void set_has_quantile();
+ inline void clear_has_quantile();
+ inline void set_has_value();
+ inline void clear_has_value();
+
+ ::google::protobuf::UnknownFieldSet _unknown_fields_;
+
+ ::google::protobuf::uint32 _has_bits_[1];
+ mutable int _cached_size_;
+ double quantile_;
+ double value_;
+ friend void protobuf_AddDesc_metrics_2eproto();
+ friend void protobuf_AssignDesc_metrics_2eproto();
+ friend void protobuf_ShutdownFile_metrics_2eproto();
+
+ void InitAsDefaultInstance();
+ static Quantile* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class Summary : public ::google::protobuf::Message {
+ public:
+ Summary();
+ virtual ~Summary();
+
+ Summary(const Summary& from);
+
+ inline Summary& operator=(const Summary& from) {
+ CopyFrom(from);
+ return *this;
+ }
+
+ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+ return _unknown_fields_;
+ }
+
+ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+ return &_unknown_fields_;
+ }
+
+ static const ::google::protobuf::Descriptor* descriptor();
+ static const Summary& default_instance();
+
+ void Swap(Summary* other);
+
+ // implements Message ----------------------------------------------
+
+ Summary* New() const;
+ void CopyFrom(const ::google::protobuf::Message& from);
+ void MergeFrom(const ::google::protobuf::Message& from);
+ void CopyFrom(const Summary& from);
+ void MergeFrom(const Summary& from);
+ void Clear();
+ bool IsInitialized() const;
+
+ int ByteSize() const;
+ bool MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input);
+ void SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const;
+ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+ int GetCachedSize() const { return _cached_size_; }
+ private:
+ void SharedCtor();
+ void SharedDtor();
+ void SetCachedSize(int size) const;
+ public:
+ ::google::protobuf::Metadata GetMetadata() const;
+
+ // nested types ----------------------------------------------------
+
+ // accessors -------------------------------------------------------
+
+ // optional uint64 sample_count = 1;
+ inline bool has_sample_count() const;
+ inline void clear_sample_count();
+ static const int kSampleCountFieldNumber = 1;
+ inline ::google::protobuf::uint64 sample_count() const;
+ inline void set_sample_count(::google::protobuf::uint64 value);
+
+ // optional double sample_sum = 2;
+ inline bool has_sample_sum() const;
+ inline void clear_sample_sum();
+ static const int kSampleSumFieldNumber = 2;
+ inline double sample_sum() const;
+ inline void set_sample_sum(double value);
+
+ // repeated .io.prometheus.client.Quantile quantile = 3;
+ inline int quantile_size() const;
+ inline void clear_quantile();
+ static const int kQuantileFieldNumber = 3;
+ inline const ::io::prometheus::client::Quantile& quantile(int index) const;
+ inline ::io::prometheus::client::Quantile* mutable_quantile(int index);
+ inline ::io::prometheus::client::Quantile* add_quantile();
+ inline const ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Quantile >&
+ quantile() const;
+ inline ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Quantile >*
+ mutable_quantile();
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.Summary)
+ private:
+ inline void set_has_sample_count();
+ inline void clear_has_sample_count();
+ inline void set_has_sample_sum();
+ inline void clear_has_sample_sum();
+
+ ::google::protobuf::UnknownFieldSet _unknown_fields_;
+
+ ::google::protobuf::uint32 _has_bits_[1];
+ mutable int _cached_size_;
+ ::google::protobuf::uint64 sample_count_;
+ double sample_sum_;
+ ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Quantile > quantile_;
+ friend void protobuf_AddDesc_metrics_2eproto();
+ friend void protobuf_AssignDesc_metrics_2eproto();
+ friend void protobuf_ShutdownFile_metrics_2eproto();
+
+ void InitAsDefaultInstance();
+ static Summary* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class Untyped : public ::google::protobuf::Message {
+ public:
+ Untyped();
+ virtual ~Untyped();
+
+ Untyped(const Untyped& from);
+
+ inline Untyped& operator=(const Untyped& from) {
+ CopyFrom(from);
+ return *this;
+ }
+
+ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+ return _unknown_fields_;
+ }
+
+ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+ return &_unknown_fields_;
+ }
+
+ static const ::google::protobuf::Descriptor* descriptor();
+ static const Untyped& default_instance();
+
+ void Swap(Untyped* other);
+
+ // implements Message ----------------------------------------------
+
+ Untyped* New() const;
+ void CopyFrom(const ::google::protobuf::Message& from);
+ void MergeFrom(const ::google::protobuf::Message& from);
+ void CopyFrom(const Untyped& from);
+ void MergeFrom(const Untyped& from);
+ void Clear();
+ bool IsInitialized() const;
+
+ int ByteSize() const;
+ bool MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input);
+ void SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const;
+ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+ int GetCachedSize() const { return _cached_size_; }
+ private:
+ void SharedCtor();
+ void SharedDtor();
+ void SetCachedSize(int size) const;
+ public:
+ ::google::protobuf::Metadata GetMetadata() const;
+
+ // nested types ----------------------------------------------------
+
+ // accessors -------------------------------------------------------
+
+ // optional double value = 1;
+ inline bool has_value() const;
+ inline void clear_value();
+ static const int kValueFieldNumber = 1;
+ inline double value() const;
+ inline void set_value(double value);
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.Untyped)
+ private:
+ inline void set_has_value();
+ inline void clear_has_value();
+
+ ::google::protobuf::UnknownFieldSet _unknown_fields_;
+
+ ::google::protobuf::uint32 _has_bits_[1];
+ mutable int _cached_size_;
+ double value_;
+ friend void protobuf_AddDesc_metrics_2eproto();
+ friend void protobuf_AssignDesc_metrics_2eproto();
+ friend void protobuf_ShutdownFile_metrics_2eproto();
+
+ void InitAsDefaultInstance();
+ static Untyped* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class Histogram : public ::google::protobuf::Message {
+ public:
+ Histogram();
+ virtual ~Histogram();
+
+ Histogram(const Histogram& from);
+
+ inline Histogram& operator=(const Histogram& from) {
+ CopyFrom(from);
+ return *this;
+ }
+
+ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+ return _unknown_fields_;
+ }
+
+ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+ return &_unknown_fields_;
+ }
+
+ static const ::google::protobuf::Descriptor* descriptor();
+ static const Histogram& default_instance();
+
+ void Swap(Histogram* other);
+
+ // implements Message ----------------------------------------------
+
+ Histogram* New() const;
+ void CopyFrom(const ::google::protobuf::Message& from);
+ void MergeFrom(const ::google::protobuf::Message& from);
+ void CopyFrom(const Histogram& from);
+ void MergeFrom(const Histogram& from);
+ void Clear();
+ bool IsInitialized() const;
+
+ int ByteSize() const;
+ bool MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input);
+ void SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const;
+ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+ int GetCachedSize() const { return _cached_size_; }
+ private:
+ void SharedCtor();
+ void SharedDtor();
+ void SetCachedSize(int size) const;
+ public:
+ ::google::protobuf::Metadata GetMetadata() const;
+
+ // nested types ----------------------------------------------------
+
+ // accessors -------------------------------------------------------
+
+ // optional uint64 sample_count = 1;
+ inline bool has_sample_count() const;
+ inline void clear_sample_count();
+ static const int kSampleCountFieldNumber = 1;
+ inline ::google::protobuf::uint64 sample_count() const;
+ inline void set_sample_count(::google::protobuf::uint64 value);
+
+ // optional double sample_sum = 2;
+ inline bool has_sample_sum() const;
+ inline void clear_sample_sum();
+ static const int kSampleSumFieldNumber = 2;
+ inline double sample_sum() const;
+ inline void set_sample_sum(double value);
+
+ // repeated .io.prometheus.client.Bucket bucket = 3;
+ inline int bucket_size() const;
+ inline void clear_bucket();
+ static const int kBucketFieldNumber = 3;
+ inline const ::io::prometheus::client::Bucket& bucket(int index) const;
+ inline ::io::prometheus::client::Bucket* mutable_bucket(int index);
+ inline ::io::prometheus::client::Bucket* add_bucket();
+ inline const ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Bucket >&
+ bucket() const;
+ inline ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Bucket >*
+ mutable_bucket();
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.Histogram)
+ private:
+ inline void set_has_sample_count();
+ inline void clear_has_sample_count();
+ inline void set_has_sample_sum();
+ inline void clear_has_sample_sum();
+
+ ::google::protobuf::UnknownFieldSet _unknown_fields_;
+
+ ::google::protobuf::uint32 _has_bits_[1];
+ mutable int _cached_size_;
+ ::google::protobuf::uint64 sample_count_;
+ double sample_sum_;
+ ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Bucket > bucket_;
+ friend void protobuf_AddDesc_metrics_2eproto();
+ friend void protobuf_AssignDesc_metrics_2eproto();
+ friend void protobuf_ShutdownFile_metrics_2eproto();
+
+ void InitAsDefaultInstance();
+ static Histogram* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class Bucket : public ::google::protobuf::Message {
+ public:
+ Bucket();
+ virtual ~Bucket();
+
+ Bucket(const Bucket& from);
+
+ inline Bucket& operator=(const Bucket& from) {
+ CopyFrom(from);
+ return *this;
+ }
+
+ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+ return _unknown_fields_;
+ }
+
+ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+ return &_unknown_fields_;
+ }
+
+ static const ::google::protobuf::Descriptor* descriptor();
+ static const Bucket& default_instance();
+
+ void Swap(Bucket* other);
+
+ // implements Message ----------------------------------------------
+
+ Bucket* New() const;
+ void CopyFrom(const ::google::protobuf::Message& from);
+ void MergeFrom(const ::google::protobuf::Message& from);
+ void CopyFrom(const Bucket& from);
+ void MergeFrom(const Bucket& from);
+ void Clear();
+ bool IsInitialized() const;
+
+ int ByteSize() const;
+ bool MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input);
+ void SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const;
+ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+ int GetCachedSize() const { return _cached_size_; }
+ private:
+ void SharedCtor();
+ void SharedDtor();
+ void SetCachedSize(int size) const;
+ public:
+ ::google::protobuf::Metadata GetMetadata() const;
+
+ // nested types ----------------------------------------------------
+
+ // accessors -------------------------------------------------------
+
+ // optional uint64 cumulative_count = 1;
+ inline bool has_cumulative_count() const;
+ inline void clear_cumulative_count();
+ static const int kCumulativeCountFieldNumber = 1;
+ inline ::google::protobuf::uint64 cumulative_count() const;
+ inline void set_cumulative_count(::google::protobuf::uint64 value);
+
+ // optional double upper_bound = 2;
+ inline bool has_upper_bound() const;
+ inline void clear_upper_bound();
+ static const int kUpperBoundFieldNumber = 2;
+ inline double upper_bound() const;
+ inline void set_upper_bound(double value);
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.Bucket)
+ private:
+ inline void set_has_cumulative_count();
+ inline void clear_has_cumulative_count();
+ inline void set_has_upper_bound();
+ inline void clear_has_upper_bound();
+
+ ::google::protobuf::UnknownFieldSet _unknown_fields_;
+
+ ::google::protobuf::uint32 _has_bits_[1];
+ mutable int _cached_size_;
+ ::google::protobuf::uint64 cumulative_count_;
+ double upper_bound_;
+ friend void protobuf_AddDesc_metrics_2eproto();
+ friend void protobuf_AssignDesc_metrics_2eproto();
+ friend void protobuf_ShutdownFile_metrics_2eproto();
+
+ void InitAsDefaultInstance();
+ static Bucket* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class Metric : public ::google::protobuf::Message {
+ public:
+ Metric();
+ virtual ~Metric();
+
+ Metric(const Metric& from);
+
+ inline Metric& operator=(const Metric& from) {
+ CopyFrom(from);
+ return *this;
+ }
+
+ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+ return _unknown_fields_;
+ }
+
+ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+ return &_unknown_fields_;
+ }
+
+ static const ::google::protobuf::Descriptor* descriptor();
+ static const Metric& default_instance();
+
+ void Swap(Metric* other);
+
+ // implements Message ----------------------------------------------
+
+ Metric* New() const;
+ void CopyFrom(const ::google::protobuf::Message& from);
+ void MergeFrom(const ::google::protobuf::Message& from);
+ void CopyFrom(const Metric& from);
+ void MergeFrom(const Metric& from);
+ void Clear();
+ bool IsInitialized() const;
+
+ int ByteSize() const;
+ bool MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input);
+ void SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const;
+ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+ int GetCachedSize() const { return _cached_size_; }
+ private:
+ void SharedCtor();
+ void SharedDtor();
+ void SetCachedSize(int size) const;
+ public:
+ ::google::protobuf::Metadata GetMetadata() const;
+
+ // nested types ----------------------------------------------------
+
+ // accessors -------------------------------------------------------
+
+ // repeated .io.prometheus.client.LabelPair label = 1;
+ inline int label_size() const;
+ inline void clear_label();
+ static const int kLabelFieldNumber = 1;
+ inline const ::io::prometheus::client::LabelPair& label(int index) const;
+ inline ::io::prometheus::client::LabelPair* mutable_label(int index);
+ inline ::io::prometheus::client::LabelPair* add_label();
+ inline const ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::LabelPair >&
+ label() const;
+ inline ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::LabelPair >*
+ mutable_label();
+
+ // optional .io.prometheus.client.Gauge gauge = 2;
+ inline bool has_gauge() const;
+ inline void clear_gauge();
+ static const int kGaugeFieldNumber = 2;
+ inline const ::io::prometheus::client::Gauge& gauge() const;
+ inline ::io::prometheus::client::Gauge* mutable_gauge();
+ inline ::io::prometheus::client::Gauge* release_gauge();
+ inline void set_allocated_gauge(::io::prometheus::client::Gauge* gauge);
+
+ // optional .io.prometheus.client.Counter counter = 3;
+ inline bool has_counter() const;
+ inline void clear_counter();
+ static const int kCounterFieldNumber = 3;
+ inline const ::io::prometheus::client::Counter& counter() const;
+ inline ::io::prometheus::client::Counter* mutable_counter();
+ inline ::io::prometheus::client::Counter* release_counter();
+ inline void set_allocated_counter(::io::prometheus::client::Counter* counter);
+
+ // optional .io.prometheus.client.Summary summary = 4;
+ inline bool has_summary() const;
+ inline void clear_summary();
+ static const int kSummaryFieldNumber = 4;
+ inline const ::io::prometheus::client::Summary& summary() const;
+ inline ::io::prometheus::client::Summary* mutable_summary();
+ inline ::io::prometheus::client::Summary* release_summary();
+ inline void set_allocated_summary(::io::prometheus::client::Summary* summary);
+
+ // optional .io.prometheus.client.Untyped untyped = 5;
+ inline bool has_untyped() const;
+ inline void clear_untyped();
+ static const int kUntypedFieldNumber = 5;
+ inline const ::io::prometheus::client::Untyped& untyped() const;
+ inline ::io::prometheus::client::Untyped* mutable_untyped();
+ inline ::io::prometheus::client::Untyped* release_untyped();
+ inline void set_allocated_untyped(::io::prometheus::client::Untyped* untyped);
+
+ // optional .io.prometheus.client.Histogram histogram = 7;
+ inline bool has_histogram() const;
+ inline void clear_histogram();
+ static const int kHistogramFieldNumber = 7;
+ inline const ::io::prometheus::client::Histogram& histogram() const;
+ inline ::io::prometheus::client::Histogram* mutable_histogram();
+ inline ::io::prometheus::client::Histogram* release_histogram();
+ inline void set_allocated_histogram(::io::prometheus::client::Histogram* histogram);
+
+ // optional int64 timestamp_ms = 6;
+ inline bool has_timestamp_ms() const;
+ inline void clear_timestamp_ms();
+ static const int kTimestampMsFieldNumber = 6;
+ inline ::google::protobuf::int64 timestamp_ms() const;
+ inline void set_timestamp_ms(::google::protobuf::int64 value);
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.Metric)
+ private:
+ inline void set_has_gauge();
+ inline void clear_has_gauge();
+ inline void set_has_counter();
+ inline void clear_has_counter();
+ inline void set_has_summary();
+ inline void clear_has_summary();
+ inline void set_has_untyped();
+ inline void clear_has_untyped();
+ inline void set_has_histogram();
+ inline void clear_has_histogram();
+ inline void set_has_timestamp_ms();
+ inline void clear_has_timestamp_ms();
+
+ ::google::protobuf::UnknownFieldSet _unknown_fields_;
+
+ ::google::protobuf::uint32 _has_bits_[1];
+ mutable int _cached_size_;
+ ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::LabelPair > label_;
+ ::io::prometheus::client::Gauge* gauge_;
+ ::io::prometheus::client::Counter* counter_;
+ ::io::prometheus::client::Summary* summary_;
+ ::io::prometheus::client::Untyped* untyped_;
+ ::io::prometheus::client::Histogram* histogram_;
+ ::google::protobuf::int64 timestamp_ms_;
+ friend void protobuf_AddDesc_metrics_2eproto();
+ friend void protobuf_AssignDesc_metrics_2eproto();
+ friend void protobuf_ShutdownFile_metrics_2eproto();
+
+ void InitAsDefaultInstance();
+ static Metric* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class MetricFamily : public ::google::protobuf::Message {
+ public:
+ MetricFamily();
+ virtual ~MetricFamily();
+
+ MetricFamily(const MetricFamily& from);
+
+ inline MetricFamily& operator=(const MetricFamily& from) {
+ CopyFrom(from);
+ return *this;
+ }
+
+ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+ return _unknown_fields_;
+ }
+
+ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+ return &_unknown_fields_;
+ }
+
+ static const ::google::protobuf::Descriptor* descriptor();
+ static const MetricFamily& default_instance();
+
+ void Swap(MetricFamily* other);
+
+ // implements Message ----------------------------------------------
+
+ MetricFamily* New() const;
+ void CopyFrom(const ::google::protobuf::Message& from);
+ void MergeFrom(const ::google::protobuf::Message& from);
+ void CopyFrom(const MetricFamily& from);
+ void MergeFrom(const MetricFamily& from);
+ void Clear();
+ bool IsInitialized() const;
+
+ int ByteSize() const;
+ bool MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input);
+ void SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const;
+ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+ int GetCachedSize() const { return _cached_size_; }
+ private:
+ void SharedCtor();
+ void SharedDtor();
+ void SetCachedSize(int size) const;
+ public:
+ ::google::protobuf::Metadata GetMetadata() const;
+
+ // nested types ----------------------------------------------------
+
+ // accessors -------------------------------------------------------
+
+ // optional string name = 1;
+ inline bool has_name() const;
+ inline void clear_name();
+ static const int kNameFieldNumber = 1;
+ inline const ::std::string& name() const;
+ inline void set_name(const ::std::string& value);
+ inline void set_name(const char* value);
+ inline void set_name(const char* value, size_t size);
+ inline ::std::string* mutable_name();
+ inline ::std::string* release_name();
+ inline void set_allocated_name(::std::string* name);
+
+ // optional string help = 2;
+ inline bool has_help() const;
+ inline void clear_help();
+ static const int kHelpFieldNumber = 2;
+ inline const ::std::string& help() const;
+ inline void set_help(const ::std::string& value);
+ inline void set_help(const char* value);
+ inline void set_help(const char* value, size_t size);
+ inline ::std::string* mutable_help();
+ inline ::std::string* release_help();
+ inline void set_allocated_help(::std::string* help);
+
+ // optional .io.prometheus.client.MetricType type = 3;
+ inline bool has_type() const;
+ inline void clear_type();
+ static const int kTypeFieldNumber = 3;
+ inline ::io::prometheus::client::MetricType type() const;
+ inline void set_type(::io::prometheus::client::MetricType value);
+
+ // repeated .io.prometheus.client.Metric metric = 4;
+ inline int metric_size() const;
+ inline void clear_metric();
+ static const int kMetricFieldNumber = 4;
+ inline const ::io::prometheus::client::Metric& metric(int index) const;
+ inline ::io::prometheus::client::Metric* mutable_metric(int index);
+ inline ::io::prometheus::client::Metric* add_metric();
+ inline const ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Metric >&
+ metric() const;
+ inline ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Metric >*
+ mutable_metric();
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.MetricFamily)
+ private:
+ inline void set_has_name();
+ inline void clear_has_name();
+ inline void set_has_help();
+ inline void clear_has_help();
+ inline void set_has_type();
+ inline void clear_has_type();
+
+ ::google::protobuf::UnknownFieldSet _unknown_fields_;
+
+ ::google::protobuf::uint32 _has_bits_[1];
+ mutable int _cached_size_;
+ ::std::string* name_;
+ ::std::string* help_;
+ ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Metric > metric_;
+ int type_;
+ friend void protobuf_AddDesc_metrics_2eproto();
+ friend void protobuf_AssignDesc_metrics_2eproto();
+ friend void protobuf_ShutdownFile_metrics_2eproto();
+
+ void InitAsDefaultInstance();
+ static MetricFamily* default_instance_;
+};
+// ===================================================================
+
+
+// ===================================================================
+
+// LabelPair
+
+// optional string name = 1;
+inline bool LabelPair::has_name() const {
+ return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void LabelPair::set_has_name() {
+ _has_bits_[0] |= 0x00000001u;
+}
+inline void LabelPair::clear_has_name() {
+ _has_bits_[0] &= ~0x00000001u;
+}
+inline void LabelPair::clear_name() {
+ if (name_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ name_->clear();
+ }
+ clear_has_name();
+}
+inline const ::std::string& LabelPair::name() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.LabelPair.name)
+ return *name_;
+}
+inline void LabelPair::set_name(const ::std::string& value) {
+ set_has_name();
+ if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ name_ = new ::std::string;
+ }
+ name_->assign(value);
+ // @@protoc_insertion_point(field_set:io.prometheus.client.LabelPair.name)
+}
+inline void LabelPair::set_name(const char* value) {
+ set_has_name();
+ if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ name_ = new ::std::string;
+ }
+ name_->assign(value);
+ // @@protoc_insertion_point(field_set_char:io.prometheus.client.LabelPair.name)
+}
+inline void LabelPair::set_name(const char* value, size_t size) {
+ set_has_name();
+ if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ name_ = new ::std::string;
+ }
+ name_->assign(reinterpret_cast<const char*>(value), size);
+ // @@protoc_insertion_point(field_set_pointer:io.prometheus.client.LabelPair.name)
+}
+inline ::std::string* LabelPair::mutable_name() {
+ set_has_name();
+ if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ name_ = new ::std::string;
+ }
+ // @@protoc_insertion_point(field_mutable:io.prometheus.client.LabelPair.name)
+ return name_;
+}
+inline ::std::string* LabelPair::release_name() {
+ clear_has_name();
+ if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ return NULL;
+ } else {
+ ::std::string* temp = name_;
+ name_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ return temp;
+ }
+}
+inline void LabelPair::set_allocated_name(::std::string* name) {
+ if (name_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ delete name_;
+ }
+ if (name) {
+ set_has_name();
+ name_ = name;
+ } else {
+ clear_has_name();
+ name_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ // @@protoc_insertion_point(field_set_allocated:io.prometheus.client.LabelPair.name)
+}
+
+// optional string value = 2;
+inline bool LabelPair::has_value() const {
+ return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void LabelPair::set_has_value() {
+ _has_bits_[0] |= 0x00000002u;
+}
+inline void LabelPair::clear_has_value() {
+ _has_bits_[0] &= ~0x00000002u;
+}
+inline void LabelPair::clear_value() {
+ if (value_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ value_->clear();
+ }
+ clear_has_value();
+}
+inline const ::std::string& LabelPair::value() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.LabelPair.value)
+ return *value_;
+}
+inline void LabelPair::set_value(const ::std::string& value) {
+ set_has_value();
+ if (value_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ value_ = new ::std::string;
+ }
+ value_->assign(value);
+ // @@protoc_insertion_point(field_set:io.prometheus.client.LabelPair.value)
+}
+inline void LabelPair::set_value(const char* value) {
+ set_has_value();
+ if (value_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ value_ = new ::std::string;
+ }
+ value_->assign(value);
+ // @@protoc_insertion_point(field_set_char:io.prometheus.client.LabelPair.value)
+}
+inline void LabelPair::set_value(const char* value, size_t size) {
+ set_has_value();
+ if (value_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ value_ = new ::std::string;
+ }
+ value_->assign(reinterpret_cast<const char*>(value), size);
+ // @@protoc_insertion_point(field_set_pointer:io.prometheus.client.LabelPair.value)
+}
+inline ::std::string* LabelPair::mutable_value() {
+ set_has_value();
+ if (value_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ value_ = new ::std::string;
+ }
+ // @@protoc_insertion_point(field_mutable:io.prometheus.client.LabelPair.value)
+ return value_;
+}
+inline ::std::string* LabelPair::release_value() {
+ clear_has_value();
+ if (value_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ return NULL;
+ } else {
+ ::std::string* temp = value_;
+ value_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ return temp;
+ }
+}
+inline void LabelPair::set_allocated_value(::std::string* value) {
+ if (value_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ delete value_;
+ }
+ if (value) {
+ set_has_value();
+ value_ = value;
+ } else {
+ clear_has_value();
+ value_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ // @@protoc_insertion_point(field_set_allocated:io.prometheus.client.LabelPair.value)
+}
+
+// -------------------------------------------------------------------
+
+// Gauge
+
+// optional double value = 1;
+inline bool Gauge::has_value() const {
+ return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void Gauge::set_has_value() {
+ _has_bits_[0] |= 0x00000001u;
+}
+inline void Gauge::clear_has_value() {
+ _has_bits_[0] &= ~0x00000001u;
+}
+inline void Gauge::clear_value() {
+ value_ = 0;
+ clear_has_value();
+}
+inline double Gauge::value() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Gauge.value)
+ return value_;
+}
+inline void Gauge::set_value(double value) {
+ set_has_value();
+ value_ = value;
+ // @@protoc_insertion_point(field_set:io.prometheus.client.Gauge.value)
+}
+
+// -------------------------------------------------------------------
+
+// Counter
+
+// optional double value = 1;
+inline bool Counter::has_value() const {
+ return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void Counter::set_has_value() {
+ _has_bits_[0] |= 0x00000001u;
+}
+inline void Counter::clear_has_value() {
+ _has_bits_[0] &= ~0x00000001u;
+}
+inline void Counter::clear_value() {
+ value_ = 0;
+ clear_has_value();
+}
+inline double Counter::value() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Counter.value)
+ return value_;
+}
+inline void Counter::set_value(double value) {
+ set_has_value();
+ value_ = value;
+ // @@protoc_insertion_point(field_set:io.prometheus.client.Counter.value)
+}
+
+// -------------------------------------------------------------------
+
+// Quantile
+
+// optional double quantile = 1;
+inline bool Quantile::has_quantile() const {
+ return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void Quantile::set_has_quantile() {
+ _has_bits_[0] |= 0x00000001u;
+}
+inline void Quantile::clear_has_quantile() {
+ _has_bits_[0] &= ~0x00000001u;
+}
+inline void Quantile::clear_quantile() {
+ quantile_ = 0;
+ clear_has_quantile();
+}
+inline double Quantile::quantile() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Quantile.quantile)
+ return quantile_;
+}
+inline void Quantile::set_quantile(double value) {
+ set_has_quantile();
+ quantile_ = value;
+ // @@protoc_insertion_point(field_set:io.prometheus.client.Quantile.quantile)
+}
+
+// optional double value = 2;
+inline bool Quantile::has_value() const {
+ return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void Quantile::set_has_value() {
+ _has_bits_[0] |= 0x00000002u;
+}
+inline void Quantile::clear_has_value() {
+ _has_bits_[0] &= ~0x00000002u;
+}
+inline void Quantile::clear_value() {
+ value_ = 0;
+ clear_has_value();
+}
+inline double Quantile::value() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Quantile.value)
+ return value_;
+}
+inline void Quantile::set_value(double value) {
+ set_has_value();
+ value_ = value;
+ // @@protoc_insertion_point(field_set:io.prometheus.client.Quantile.value)
+}
+
+// -------------------------------------------------------------------
+
+// Summary
+
+// optional uint64 sample_count = 1;
+inline bool Summary::has_sample_count() const {
+ return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void Summary::set_has_sample_count() {
+ _has_bits_[0] |= 0x00000001u;
+}
+inline void Summary::clear_has_sample_count() {
+ _has_bits_[0] &= ~0x00000001u;
+}
+inline void Summary::clear_sample_count() {
+ sample_count_ = GOOGLE_ULONGLONG(0);
+ clear_has_sample_count();
+}
+inline ::google::protobuf::uint64 Summary::sample_count() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Summary.sample_count)
+ return sample_count_;
+}
+inline void Summary::set_sample_count(::google::protobuf::uint64 value) {
+ set_has_sample_count();
+ sample_count_ = value;
+ // @@protoc_insertion_point(field_set:io.prometheus.client.Summary.sample_count)
+}
+
+// optional double sample_sum = 2;
+inline bool Summary::has_sample_sum() const {
+ return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void Summary::set_has_sample_sum() {
+ _has_bits_[0] |= 0x00000002u;
+}
+inline void Summary::clear_has_sample_sum() {
+ _has_bits_[0] &= ~0x00000002u;
+}
+inline void Summary::clear_sample_sum() {
+ sample_sum_ = 0;
+ clear_has_sample_sum();
+}
+inline double Summary::sample_sum() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Summary.sample_sum)
+ return sample_sum_;
+}
+inline void Summary::set_sample_sum(double value) {
+ set_has_sample_sum();
+ sample_sum_ = value;
+ // @@protoc_insertion_point(field_set:io.prometheus.client.Summary.sample_sum)
+}
+
+// repeated .io.prometheus.client.Quantile quantile = 3;
+inline int Summary::quantile_size() const {
+ return quantile_.size();
+}
+inline void Summary::clear_quantile() {
+ quantile_.Clear();
+}
+inline const ::io::prometheus::client::Quantile& Summary::quantile(int index) const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Summary.quantile)
+ return quantile_.Get(index);
+}
+inline ::io::prometheus::client::Quantile* Summary::mutable_quantile(int index) {
+ // @@protoc_insertion_point(field_mutable:io.prometheus.client.Summary.quantile)
+ return quantile_.Mutable(index);
+}
+inline ::io::prometheus::client::Quantile* Summary::add_quantile() {
+ // @@protoc_insertion_point(field_add:io.prometheus.client.Summary.quantile)
+ return quantile_.Add();
+}
+inline const ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Quantile >&
+Summary::quantile() const {
+ // @@protoc_insertion_point(field_list:io.prometheus.client.Summary.quantile)
+ return quantile_;
+}
+inline ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Quantile >*
+Summary::mutable_quantile() {
+ // @@protoc_insertion_point(field_mutable_list:io.prometheus.client.Summary.quantile)
+ return &quantile_;
+}
+
+// -------------------------------------------------------------------
+
+// Untyped
+
+// optional double value = 1;
+inline bool Untyped::has_value() const {
+ return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void Untyped::set_has_value() {
+ _has_bits_[0] |= 0x00000001u;
+}
+inline void Untyped::clear_has_value() {
+ _has_bits_[0] &= ~0x00000001u;
+}
+inline void Untyped::clear_value() {
+ value_ = 0;
+ clear_has_value();
+}
+inline double Untyped::value() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Untyped.value)
+ return value_;
+}
+inline void Untyped::set_value(double value) {
+ set_has_value();
+ value_ = value;
+ // @@protoc_insertion_point(field_set:io.prometheus.client.Untyped.value)
+}
+
+// -------------------------------------------------------------------
+
+// Histogram
+
+// optional uint64 sample_count = 1;
+inline bool Histogram::has_sample_count() const {
+ return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void Histogram::set_has_sample_count() {
+ _has_bits_[0] |= 0x00000001u;
+}
+inline void Histogram::clear_has_sample_count() {
+ _has_bits_[0] &= ~0x00000001u;
+}
+inline void Histogram::clear_sample_count() {
+ sample_count_ = GOOGLE_ULONGLONG(0);
+ clear_has_sample_count();
+}
+inline ::google::protobuf::uint64 Histogram::sample_count() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Histogram.sample_count)
+ return sample_count_;
+}
+inline void Histogram::set_sample_count(::google::protobuf::uint64 value) {
+ set_has_sample_count();
+ sample_count_ = value;
+ // @@protoc_insertion_point(field_set:io.prometheus.client.Histogram.sample_count)
+}
+
+// optional double sample_sum = 2;
+inline bool Histogram::has_sample_sum() const {
+ return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void Histogram::set_has_sample_sum() {
+ _has_bits_[0] |= 0x00000002u;
+}
+inline void Histogram::clear_has_sample_sum() {
+ _has_bits_[0] &= ~0x00000002u;
+}
+inline void Histogram::clear_sample_sum() {
+ sample_sum_ = 0;
+ clear_has_sample_sum();
+}
+inline double Histogram::sample_sum() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Histogram.sample_sum)
+ return sample_sum_;
+}
+inline void Histogram::set_sample_sum(double value) {
+ set_has_sample_sum();
+ sample_sum_ = value;
+ // @@protoc_insertion_point(field_set:io.prometheus.client.Histogram.sample_sum)
+}
+
+// repeated .io.prometheus.client.Bucket bucket = 3;
+inline int Histogram::bucket_size() const {
+ return bucket_.size();
+}
+inline void Histogram::clear_bucket() {
+ bucket_.Clear();
+}
+inline const ::io::prometheus::client::Bucket& Histogram::bucket(int index) const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Histogram.bucket)
+ return bucket_.Get(index);
+}
+inline ::io::prometheus::client::Bucket* Histogram::mutable_bucket(int index) {
+ // @@protoc_insertion_point(field_mutable:io.prometheus.client.Histogram.bucket)
+ return bucket_.Mutable(index);
+}
+inline ::io::prometheus::client::Bucket* Histogram::add_bucket() {
+ // @@protoc_insertion_point(field_add:io.prometheus.client.Histogram.bucket)
+ return bucket_.Add();
+}
+inline const ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Bucket >&
+Histogram::bucket() const {
+ // @@protoc_insertion_point(field_list:io.prometheus.client.Histogram.bucket)
+ return bucket_;
+}
+inline ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Bucket >*
+Histogram::mutable_bucket() {
+ // @@protoc_insertion_point(field_mutable_list:io.prometheus.client.Histogram.bucket)
+ return &bucket_;
+}
+
+// -------------------------------------------------------------------
+
+// Bucket
+
+// optional uint64 cumulative_count = 1;
+inline bool Bucket::has_cumulative_count() const {
+ return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void Bucket::set_has_cumulative_count() {
+ _has_bits_[0] |= 0x00000001u;
+}
+inline void Bucket::clear_has_cumulative_count() {
+ _has_bits_[0] &= ~0x00000001u;
+}
+inline void Bucket::clear_cumulative_count() {
+ cumulative_count_ = GOOGLE_ULONGLONG(0);
+ clear_has_cumulative_count();
+}
+inline ::google::protobuf::uint64 Bucket::cumulative_count() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Bucket.cumulative_count)
+ return cumulative_count_;
+}
+inline void Bucket::set_cumulative_count(::google::protobuf::uint64 value) {
+ set_has_cumulative_count();
+ cumulative_count_ = value;
+ // @@protoc_insertion_point(field_set:io.prometheus.client.Bucket.cumulative_count)
+}
+
+// optional double upper_bound = 2;
+inline bool Bucket::has_upper_bound() const {
+ return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void Bucket::set_has_upper_bound() {
+ _has_bits_[0] |= 0x00000002u;
+}
+inline void Bucket::clear_has_upper_bound() {
+ _has_bits_[0] &= ~0x00000002u;
+}
+inline void Bucket::clear_upper_bound() {
+ upper_bound_ = 0;
+ clear_has_upper_bound();
+}
+inline double Bucket::upper_bound() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Bucket.upper_bound)
+ return upper_bound_;
+}
+inline void Bucket::set_upper_bound(double value) {
+ set_has_upper_bound();
+ upper_bound_ = value;
+ // @@protoc_insertion_point(field_set:io.prometheus.client.Bucket.upper_bound)
+}
+
+// -------------------------------------------------------------------
+
+// Metric
+
+// repeated .io.prometheus.client.LabelPair label = 1;
+inline int Metric::label_size() const {
+ return label_.size();
+}
+inline void Metric::clear_label() {
+ label_.Clear();
+}
+inline const ::io::prometheus::client::LabelPair& Metric::label(int index) const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Metric.label)
+ return label_.Get(index);
+}
+inline ::io::prometheus::client::LabelPair* Metric::mutable_label(int index) {
+ // @@protoc_insertion_point(field_mutable:io.prometheus.client.Metric.label)
+ return label_.Mutable(index);
+}
+inline ::io::prometheus::client::LabelPair* Metric::add_label() {
+ // @@protoc_insertion_point(field_add:io.prometheus.client.Metric.label)
+ return label_.Add();
+}
+inline const ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::LabelPair >&
+Metric::label() const {
+ // @@protoc_insertion_point(field_list:io.prometheus.client.Metric.label)
+ return label_;
+}
+inline ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::LabelPair >*
+Metric::mutable_label() {
+ // @@protoc_insertion_point(field_mutable_list:io.prometheus.client.Metric.label)
+ return &label_;
+}
+
+// optional .io.prometheus.client.Gauge gauge = 2;
+inline bool Metric::has_gauge() const {
+ return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void Metric::set_has_gauge() {
+ _has_bits_[0] |= 0x00000002u;
+}
+inline void Metric::clear_has_gauge() {
+ _has_bits_[0] &= ~0x00000002u;
+}
+inline void Metric::clear_gauge() {
+ if (gauge_ != NULL) gauge_->::io::prometheus::client::Gauge::Clear();
+ clear_has_gauge();
+}
+inline const ::io::prometheus::client::Gauge& Metric::gauge() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Metric.gauge)
+ return gauge_ != NULL ? *gauge_ : *default_instance_->gauge_;
+}
+inline ::io::prometheus::client::Gauge* Metric::mutable_gauge() {
+ set_has_gauge();
+ if (gauge_ == NULL) gauge_ = new ::io::prometheus::client::Gauge;
+ // @@protoc_insertion_point(field_mutable:io.prometheus.client.Metric.gauge)
+ return gauge_;
+}
+inline ::io::prometheus::client::Gauge* Metric::release_gauge() {
+ clear_has_gauge();
+ ::io::prometheus::client::Gauge* temp = gauge_;
+ gauge_ = NULL;
+ return temp;
+}
+inline void Metric::set_allocated_gauge(::io::prometheus::client::Gauge* gauge) {
+ delete gauge_;
+ gauge_ = gauge;
+ if (gauge) {
+ set_has_gauge();
+ } else {
+ clear_has_gauge();
+ }
+ // @@protoc_insertion_point(field_set_allocated:io.prometheus.client.Metric.gauge)
+}
+
+// optional .io.prometheus.client.Counter counter = 3;
+inline bool Metric::has_counter() const {
+ return (_has_bits_[0] & 0x00000004u) != 0;
+}
+inline void Metric::set_has_counter() {
+ _has_bits_[0] |= 0x00000004u;
+}
+inline void Metric::clear_has_counter() {
+ _has_bits_[0] &= ~0x00000004u;
+}
+inline void Metric::clear_counter() {
+ if (counter_ != NULL) counter_->::io::prometheus::client::Counter::Clear();
+ clear_has_counter();
+}
+inline const ::io::prometheus::client::Counter& Metric::counter() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Metric.counter)
+ return counter_ != NULL ? *counter_ : *default_instance_->counter_;
+}
+inline ::io::prometheus::client::Counter* Metric::mutable_counter() {
+ set_has_counter();
+ if (counter_ == NULL) counter_ = new ::io::prometheus::client::Counter;
+ // @@protoc_insertion_point(field_mutable:io.prometheus.client.Metric.counter)
+ return counter_;
+}
+inline ::io::prometheus::client::Counter* Metric::release_counter() {
+ clear_has_counter();
+ ::io::prometheus::client::Counter* temp = counter_;
+ counter_ = NULL;
+ return temp;
+}
+inline void Metric::set_allocated_counter(::io::prometheus::client::Counter* counter) {
+ delete counter_;
+ counter_ = counter;
+ if (counter) {
+ set_has_counter();
+ } else {
+ clear_has_counter();
+ }
+ // @@protoc_insertion_point(field_set_allocated:io.prometheus.client.Metric.counter)
+}
+
+// optional .io.prometheus.client.Summary summary = 4;
+inline bool Metric::has_summary() const {
+ return (_has_bits_[0] & 0x00000008u) != 0;
+}
+inline void Metric::set_has_summary() {
+ _has_bits_[0] |= 0x00000008u;
+}
+inline void Metric::clear_has_summary() {
+ _has_bits_[0] &= ~0x00000008u;
+}
+inline void Metric::clear_summary() {
+ if (summary_ != NULL) summary_->::io::prometheus::client::Summary::Clear();
+ clear_has_summary();
+}
+inline const ::io::prometheus::client::Summary& Metric::summary() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Metric.summary)
+ return summary_ != NULL ? *summary_ : *default_instance_->summary_;
+}
+inline ::io::prometheus::client::Summary* Metric::mutable_summary() {
+ set_has_summary();
+ if (summary_ == NULL) summary_ = new ::io::prometheus::client::Summary;
+ // @@protoc_insertion_point(field_mutable:io.prometheus.client.Metric.summary)
+ return summary_;
+}
+inline ::io::prometheus::client::Summary* Metric::release_summary() {
+ clear_has_summary();
+ ::io::prometheus::client::Summary* temp = summary_;
+ summary_ = NULL;
+ return temp;
+}
+inline void Metric::set_allocated_summary(::io::prometheus::client::Summary* summary) {
+ delete summary_;
+ summary_ = summary;
+ if (summary) {
+ set_has_summary();
+ } else {
+ clear_has_summary();
+ }
+ // @@protoc_insertion_point(field_set_allocated:io.prometheus.client.Metric.summary)
+}
+
+// optional .io.prometheus.client.Untyped untyped = 5;
+inline bool Metric::has_untyped() const {
+ return (_has_bits_[0] & 0x00000010u) != 0;
+}
+inline void Metric::set_has_untyped() {
+ _has_bits_[0] |= 0x00000010u;
+}
+inline void Metric::clear_has_untyped() {
+ _has_bits_[0] &= ~0x00000010u;
+}
+inline void Metric::clear_untyped() {
+ if (untyped_ != NULL) untyped_->::io::prometheus::client::Untyped::Clear();
+ clear_has_untyped();
+}
+inline const ::io::prometheus::client::Untyped& Metric::untyped() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Metric.untyped)
+ return untyped_ != NULL ? *untyped_ : *default_instance_->untyped_;
+}
+inline ::io::prometheus::client::Untyped* Metric::mutable_untyped() {
+ set_has_untyped();
+ if (untyped_ == NULL) untyped_ = new ::io::prometheus::client::Untyped;
+ // @@protoc_insertion_point(field_mutable:io.prometheus.client.Metric.untyped)
+ return untyped_;
+}
+inline ::io::prometheus::client::Untyped* Metric::release_untyped() {
+ clear_has_untyped();
+ ::io::prometheus::client::Untyped* temp = untyped_;
+ untyped_ = NULL;
+ return temp;
+}
+inline void Metric::set_allocated_untyped(::io::prometheus::client::Untyped* untyped) {
+ delete untyped_;
+ untyped_ = untyped;
+ if (untyped) {
+ set_has_untyped();
+ } else {
+ clear_has_untyped();
+ }
+ // @@protoc_insertion_point(field_set_allocated:io.prometheus.client.Metric.untyped)
+}
+
+// optional .io.prometheus.client.Histogram histogram = 7;
+inline bool Metric::has_histogram() const {
+ return (_has_bits_[0] & 0x00000020u) != 0;
+}
+inline void Metric::set_has_histogram() {
+ _has_bits_[0] |= 0x00000020u;
+}
+inline void Metric::clear_has_histogram() {
+ _has_bits_[0] &= ~0x00000020u;
+}
+inline void Metric::clear_histogram() {
+ if (histogram_ != NULL) histogram_->::io::prometheus::client::Histogram::Clear();
+ clear_has_histogram();
+}
+inline const ::io::prometheus::client::Histogram& Metric::histogram() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Metric.histogram)
+ return histogram_ != NULL ? *histogram_ : *default_instance_->histogram_;
+}
+inline ::io::prometheus::client::Histogram* Metric::mutable_histogram() {
+ set_has_histogram();
+ if (histogram_ == NULL) histogram_ = new ::io::prometheus::client::Histogram;
+ // @@protoc_insertion_point(field_mutable:io.prometheus.client.Metric.histogram)
+ return histogram_;
+}
+inline ::io::prometheus::client::Histogram* Metric::release_histogram() {
+ clear_has_histogram();
+ ::io::prometheus::client::Histogram* temp = histogram_;
+ histogram_ = NULL;
+ return temp;
+}
+inline void Metric::set_allocated_histogram(::io::prometheus::client::Histogram* histogram) {
+ delete histogram_;
+ histogram_ = histogram;
+ if (histogram) {
+ set_has_histogram();
+ } else {
+ clear_has_histogram();
+ }
+ // @@protoc_insertion_point(field_set_allocated:io.prometheus.client.Metric.histogram)
+}
+
+// optional int64 timestamp_ms = 6;
+inline bool Metric::has_timestamp_ms() const {
+ return (_has_bits_[0] & 0x00000040u) != 0;
+}
+inline void Metric::set_has_timestamp_ms() {
+ _has_bits_[0] |= 0x00000040u;
+}
+inline void Metric::clear_has_timestamp_ms() {
+ _has_bits_[0] &= ~0x00000040u;
+}
+inline void Metric::clear_timestamp_ms() {
+ timestamp_ms_ = GOOGLE_LONGLONG(0);
+ clear_has_timestamp_ms();
+}
+inline ::google::protobuf::int64 Metric::timestamp_ms() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.Metric.timestamp_ms)
+ return timestamp_ms_;
+}
+inline void Metric::set_timestamp_ms(::google::protobuf::int64 value) {
+ set_has_timestamp_ms();
+ timestamp_ms_ = value;
+ // @@protoc_insertion_point(field_set:io.prometheus.client.Metric.timestamp_ms)
+}
+
+// -------------------------------------------------------------------
+
+// MetricFamily
+
+// optional string name = 1;
+inline bool MetricFamily::has_name() const {
+ return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void MetricFamily::set_has_name() {
+ _has_bits_[0] |= 0x00000001u;
+}
+inline void MetricFamily::clear_has_name() {
+ _has_bits_[0] &= ~0x00000001u;
+}
+inline void MetricFamily::clear_name() {
+ if (name_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ name_->clear();
+ }
+ clear_has_name();
+}
+inline const ::std::string& MetricFamily::name() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.MetricFamily.name)
+ return *name_;
+}
+inline void MetricFamily::set_name(const ::std::string& value) {
+ set_has_name();
+ if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ name_ = new ::std::string;
+ }
+ name_->assign(value);
+ // @@protoc_insertion_point(field_set:io.prometheus.client.MetricFamily.name)
+}
+inline void MetricFamily::set_name(const char* value) {
+ set_has_name();
+ if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ name_ = new ::std::string;
+ }
+ name_->assign(value);
+ // @@protoc_insertion_point(field_set_char:io.prometheus.client.MetricFamily.name)
+}
+inline void MetricFamily::set_name(const char* value, size_t size) {
+ set_has_name();
+ if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ name_ = new ::std::string;
+ }
+ name_->assign(reinterpret_cast<const char*>(value), size);
+ // @@protoc_insertion_point(field_set_pointer:io.prometheus.client.MetricFamily.name)
+}
+inline ::std::string* MetricFamily::mutable_name() {
+ set_has_name();
+ if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ name_ = new ::std::string;
+ }
+ // @@protoc_insertion_point(field_mutable:io.prometheus.client.MetricFamily.name)
+ return name_;
+}
+inline ::std::string* MetricFamily::release_name() {
+ clear_has_name();
+ if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ return NULL;
+ } else {
+ ::std::string* temp = name_;
+ name_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ return temp;
+ }
+}
+inline void MetricFamily::set_allocated_name(::std::string* name) {
+ if (name_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ delete name_;
+ }
+ if (name) {
+ set_has_name();
+ name_ = name;
+ } else {
+ clear_has_name();
+ name_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ // @@protoc_insertion_point(field_set_allocated:io.prometheus.client.MetricFamily.name)
+}
+
+// optional string help = 2;
+inline bool MetricFamily::has_help() const {
+ return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void MetricFamily::set_has_help() {
+ _has_bits_[0] |= 0x00000002u;
+}
+inline void MetricFamily::clear_has_help() {
+ _has_bits_[0] &= ~0x00000002u;
+}
+inline void MetricFamily::clear_help() {
+ if (help_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ help_->clear();
+ }
+ clear_has_help();
+}
+inline const ::std::string& MetricFamily::help() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.MetricFamily.help)
+ return *help_;
+}
+inline void MetricFamily::set_help(const ::std::string& value) {
+ set_has_help();
+ if (help_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ help_ = new ::std::string;
+ }
+ help_->assign(value);
+ // @@protoc_insertion_point(field_set:io.prometheus.client.MetricFamily.help)
+}
+inline void MetricFamily::set_help(const char* value) {
+ set_has_help();
+ if (help_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ help_ = new ::std::string;
+ }
+ help_->assign(value);
+ // @@protoc_insertion_point(field_set_char:io.prometheus.client.MetricFamily.help)
+}
+inline void MetricFamily::set_help(const char* value, size_t size) {
+ set_has_help();
+ if (help_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ help_ = new ::std::string;
+ }
+ help_->assign(reinterpret_cast<const char*>(value), size);
+ // @@protoc_insertion_point(field_set_pointer:io.prometheus.client.MetricFamily.help)
+}
+inline ::std::string* MetricFamily::mutable_help() {
+ set_has_help();
+ if (help_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ help_ = new ::std::string;
+ }
+ // @@protoc_insertion_point(field_mutable:io.prometheus.client.MetricFamily.help)
+ return help_;
+}
+inline ::std::string* MetricFamily::release_help() {
+ clear_has_help();
+ if (help_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ return NULL;
+ } else {
+ ::std::string* temp = help_;
+ help_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ return temp;
+ }
+}
+inline void MetricFamily::set_allocated_help(::std::string* help) {
+ if (help_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) {
+ delete help_;
+ }
+ if (help) {
+ set_has_help();
+ help_ = help;
+ } else {
+ clear_has_help();
+ help_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+ }
+ // @@protoc_insertion_point(field_set_allocated:io.prometheus.client.MetricFamily.help)
+}
+
+// optional .io.prometheus.client.MetricType type = 3;
+inline bool MetricFamily::has_type() const {
+ return (_has_bits_[0] & 0x00000004u) != 0;
+}
+inline void MetricFamily::set_has_type() {
+ _has_bits_[0] |= 0x00000004u;
+}
+inline void MetricFamily::clear_has_type() {
+ _has_bits_[0] &= ~0x00000004u;
+}
+inline void MetricFamily::clear_type() {
+ type_ = 0;
+ clear_has_type();
+}
+inline ::io::prometheus::client::MetricType MetricFamily::type() const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.MetricFamily.type)
+ return static_cast< ::io::prometheus::client::MetricType >(type_);
+}
+inline void MetricFamily::set_type(::io::prometheus::client::MetricType value) {
+ assert(::io::prometheus::client::MetricType_IsValid(value));
+ set_has_type();
+ type_ = value;
+ // @@protoc_insertion_point(field_set:io.prometheus.client.MetricFamily.type)
+}
+
+// repeated .io.prometheus.client.Metric metric = 4;
+inline int MetricFamily::metric_size() const {
+ return metric_.size();
+}
+inline void MetricFamily::clear_metric() {
+ metric_.Clear();
+}
+inline const ::io::prometheus::client::Metric& MetricFamily::metric(int index) const {
+ // @@protoc_insertion_point(field_get:io.prometheus.client.MetricFamily.metric)
+ return metric_.Get(index);
+}
+inline ::io::prometheus::client::Metric* MetricFamily::mutable_metric(int index) {
+ // @@protoc_insertion_point(field_mutable:io.prometheus.client.MetricFamily.metric)
+ return metric_.Mutable(index);
+}
+inline ::io::prometheus::client::Metric* MetricFamily::add_metric() {
+ // @@protoc_insertion_point(field_add:io.prometheus.client.MetricFamily.metric)
+ return metric_.Add();
+}
+inline const ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Metric >&
+MetricFamily::metric() const {
+ // @@protoc_insertion_point(field_list:io.prometheus.client.MetricFamily.metric)
+ return metric_;
+}
+inline ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Metric >*
+MetricFamily::mutable_metric() {
+ // @@protoc_insertion_point(field_mutable_list:io.prometheus.client.MetricFamily.metric)
+ return &metric_;
+}
+
+
+// @@protoc_insertion_point(namespace_scope)
+
+} // namespace client
+} // namespace prometheus
+} // namespace io
+
+#ifndef SWIG
+namespace google {
+namespace protobuf {
+
+template <> struct is_proto_enum< ::io::prometheus::client::MetricType> : ::google::protobuf::internal::true_type {};
+template <>
+inline const EnumDescriptor* GetEnumDescriptor< ::io::prometheus::client::MetricType>() {
+ return ::io::prometheus::client::MetricType_descriptor();
+}
+
+} // namespace google
+} // namespace protobuf
+#endif // SWIG
+
+// @@protoc_insertion_point(global_scope)
+
+#endif // PROTOBUF_metrics_2eproto__INCLUDED
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
new file mode 100644
index 000000000..b065f8683
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -0,0 +1,364 @@
+// Code generated by protoc-gen-go.
+// source: metrics.proto
+// DO NOT EDIT!
+
+/*
+Package io_prometheus_client is a generated protocol buffer package.
+
+It is generated from these files:
+ metrics.proto
+
+It has these top-level messages:
+ LabelPair
+ Gauge
+ Counter
+ Quantile
+ Summary
+ Untyped
+ Histogram
+ Bucket
+ Metric
+ MetricFamily
+*/
+package io_prometheus_client
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type MetricType int32
+
+const (
+ MetricType_COUNTER MetricType = 0
+ MetricType_GAUGE MetricType = 1
+ MetricType_SUMMARY MetricType = 2
+ MetricType_UNTYPED MetricType = 3
+ MetricType_HISTOGRAM MetricType = 4
+)
+
+var MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+}
+var MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+}
+
+func (x MetricType) Enum() *MetricType {
+ p := new(MetricType)
+ *p = x
+ return p
+}
+func (x MetricType) String() string {
+ return proto.EnumName(MetricType_name, int32(x))
+}
+func (x *MetricType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+ if err != nil {
+ return err
+ }
+ *x = MetricType(value)
+ return nil
+}
+
+type LabelPair struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LabelPair) Reset() { *m = LabelPair{} }
+func (m *LabelPair) String() string { return proto.CompactTextString(m) }
+func (*LabelPair) ProtoMessage() {}
+
+func (m *LabelPair) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *LabelPair) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Gauge struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Gauge) Reset() { *m = Gauge{} }
+func (m *Gauge) String() string { return proto.CompactTextString(m) }
+func (*Gauge) ProtoMessage() {}
+
+func (m *Gauge) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Counter struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Counter) Reset() { *m = Counter{} }
+func (m *Counter) String() string { return proto.CompactTextString(m) }
+func (*Counter) ProtoMessage() {}
+
+func (m *Counter) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Quantile struct {
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Quantile) Reset() { *m = Quantile{} }
+func (m *Quantile) String() string { return proto.CompactTextString(m) }
+func (*Quantile) ProtoMessage() {}
+
+func (m *Quantile) GetQuantile() float64 {
+ if m != nil && m.Quantile != nil {
+ return *m.Quantile
+ }
+ return 0
+}
+
+func (m *Quantile) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Summary struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Summary) Reset() { *m = Summary{} }
+func (m *Summary) String() string { return proto.CompactTextString(m) }
+func (*Summary) ProtoMessage() {}
+
+func (m *Summary) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Summary) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Summary) GetQuantile() []*Quantile {
+ if m != nil {
+ return m.Quantile
+ }
+ return nil
+}
+
+type Untyped struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Untyped) Reset() { *m = Untyped{} }
+func (m *Untyped) String() string { return proto.CompactTextString(m) }
+func (*Untyped) ProtoMessage() {}
+
+func (m *Untyped) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Histogram struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Histogram) Reset() { *m = Histogram{} }
+func (m *Histogram) String() string { return proto.CompactTextString(m) }
+func (*Histogram) ProtoMessage() {}
+
+func (m *Histogram) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Histogram) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Histogram) GetBucket() []*Bucket {
+ if m != nil {
+ return m.Bucket
+ }
+ return nil
+}
+
+type Bucket struct {
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Bucket) Reset() { *m = Bucket{} }
+func (m *Bucket) String() string { return proto.CompactTextString(m) }
+func (*Bucket) ProtoMessage() {}
+
+func (m *Bucket) GetCumulativeCount() uint64 {
+ if m != nil && m.CumulativeCount != nil {
+ return *m.CumulativeCount
+ }
+ return 0
+}
+
+func (m *Bucket) GetUpperBound() float64 {
+ if m != nil && m.UpperBound != nil {
+ return *m.UpperBound
+ }
+ return 0
+}
+
+type Metric struct {
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Metric) Reset() { *m = Metric{} }
+func (m *Metric) String() string { return proto.CompactTextString(m) }
+func (*Metric) ProtoMessage() {}
+
+func (m *Metric) GetLabel() []*LabelPair {
+ if m != nil {
+ return m.Label
+ }
+ return nil
+}
+
+func (m *Metric) GetGauge() *Gauge {
+ if m != nil {
+ return m.Gauge
+ }
+ return nil
+}
+
+func (m *Metric) GetCounter() *Counter {
+ if m != nil {
+ return m.Counter
+ }
+ return nil
+}
+
+func (m *Metric) GetSummary() *Summary {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func (m *Metric) GetUntyped() *Untyped {
+ if m != nil {
+ return m.Untyped
+ }
+ return nil
+}
+
+func (m *Metric) GetHistogram() *Histogram {
+ if m != nil {
+ return m.Histogram
+ }
+ return nil
+}
+
+func (m *Metric) GetTimestampMs() int64 {
+ if m != nil && m.TimestampMs != nil {
+ return *m.TimestampMs
+ }
+ return 0
+}
+
+type MetricFamily struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MetricFamily) Reset() { *m = MetricFamily{} }
+func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
+func (*MetricFamily) ProtoMessage() {}
+
+func (m *MetricFamily) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetHelp() string {
+ if m != nil && m.Help != nil {
+ return *m.Help
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetType() MetricType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return MetricType_COUNTER
+}
+
+func (m *MetricFamily) GetMetric() []*Metric {
+ if m != nil {
+ return m.Metric
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
+}
diff --git a/vendor/github.com/prometheus/client_model/metrics.proto b/vendor/github.com/prometheus/client_model/metrics.proto
new file mode 100644
index 000000000..0b84af920
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/metrics.proto
@@ -0,0 +1,81 @@
+// Copyright 2013 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto2";
+
+package io.prometheus.client;
+option java_package = "io.prometheus.client";
+
+message LabelPair {
+ optional string name = 1;
+ optional string value = 2;
+}
+
+enum MetricType {
+ COUNTER = 0;
+ GAUGE = 1;
+ SUMMARY = 2;
+ UNTYPED = 3;
+ HISTOGRAM = 4;
+}
+
+message Gauge {
+ optional double value = 1;
+}
+
+message Counter {
+ optional double value = 1;
+}
+
+message Quantile {
+ optional double quantile = 1;
+ optional double value = 2;
+}
+
+message Summary {
+ optional uint64 sample_count = 1;
+ optional double sample_sum = 2;
+ repeated Quantile quantile = 3;
+}
+
+message Untyped {
+ optional double value = 1;
+}
+
+message Histogram {
+ optional uint64 sample_count = 1;
+ optional double sample_sum = 2;
+ repeated Bucket bucket = 3; // Ordered in increasing order of upper_bound, +Inf bucket is optional.
+}
+
+message Bucket {
+ optional uint64 cumulative_count = 1; // Cumulative in increasing order.
+ optional double upper_bound = 2; // Inclusive.
+}
+
+message Metric {
+ repeated LabelPair label = 1;
+ optional Gauge gauge = 2;
+ optional Counter counter = 3;
+ optional Summary summary = 4;
+ optional Untyped untyped = 5;
+ optional Histogram histogram = 7;
+ optional int64 timestamp_ms = 6;
+}
+
+message MetricFamily {
+ optional string name = 1;
+ optional string help = 2;
+ optional MetricType type = 3;
+ repeated Metric metric = 4;
+}
diff --git a/vendor/github.com/prometheus/client_model/pom.xml b/vendor/github.com/prometheus/client_model/pom.xml
new file mode 100644
index 000000000..4d34c9015
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/pom.xml
@@ -0,0 +1,130 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <groupId>io.prometheus.client</groupId>
+ <artifactId>model</artifactId>
+ <version>0.0.3-SNAPSHOT</version>
+
+ <parent>
+ <groupId>org.sonatype.oss</groupId>
+ <artifactId>oss-parent</artifactId>
+ <version>7</version>
+ </parent>
+
+ <name>Prometheus Client Data Model</name>
+ <url>http://github.com/prometheus/client_model</url>
+ <description>
+ Prometheus Client Data Model: Generated Protocol Buffer Assets
+ </description>
+
+ <licenses>
+ <license>
+ <name>The Apache Software License, Version 2.0</name>
+ <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
+ <distribution>repo</distribution>
+ </license>
+ </licenses>
+
+ <scm>
+ <connection>scm:git:git@github.com:prometheus/client_model.git</connection>
+ <developerConnection>scm:git:git@github.com:prometheus/client_model.git</developerConnection>
+ <url>git@github.com:prometheus/client_model.git</url>
+ </scm>
+
+ <developers>
+ <developer>
+ <id>mtp</id>
+ <name>Matt T. Proud</name>
+ <email>matt.proud@gmail.com</email>
+ </developer>
+ </developers>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.google.protobuf</groupId>
+ <artifactId>protobuf-java</artifactId>
+ <version>2.5.0</version>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-javadoc-plugin</artifactId>
+ <version>2.8</version>
+ <configuration>
+ <encoding>UTF-8</encoding>
+ <docencoding>UTF-8</docencoding>
+ <linksource>true</linksource>
+ </configuration>
+ <executions>
+ <execution>
+ <id>generate-javadoc-site-report</id>
+ <phase>site</phase>
+ <goals>
+ <goal>javadoc</goal>
+ </goals>
+ </execution>
+ <execution>
+ <id>attach-javadocs</id>
+ <goals>
+ <goal>jar</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <configuration>
+ <source>1.6</source>
+ <target>1.6</target>
+ </configuration>
+ <version>3.1</version>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-source-plugin</artifactId>
+ <version>2.2.1</version>
+ <executions>
+ <execution>
+ <id>attach-sources</id>
+ <goals>
+ <goal>jar</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ <profiles>
+ <profile>
+ <id>release-sign-artifacts</id>
+ <activation>
+ <property>
+ <name>performRelease</name>
+ <value>true</value>
+ </property>
+ </activation>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-gpg-plugin</artifactId>
+ <version>1.4</version>
+ <executions>
+ <execution>
+ <id>sign-artifacts</id>
+ <phase>verify</phase>
+ <goals>
+ <goal>sign</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ </profiles>
+</project>
diff --git a/vendor/github.com/prometheus/client_model/python/prometheus/__init__.py b/vendor/github.com/prometheus/client_model/python/prometheus/__init__.py
new file mode 100644
index 000000000..617c0ced0
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/python/prometheus/__init__.py
@@ -0,0 +1,12 @@
+ # Copyright 2013 Prometheus Team
+ # Licensed under the Apache License, Version 2.0 (the "License");
+ # you may not use this file except in compliance with the License.
+ # You may obtain a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
diff --git a/vendor/github.com/prometheus/client_model/python/prometheus/client/__init__.py b/vendor/github.com/prometheus/client_model/python/prometheus/client/__init__.py
new file mode 100644
index 000000000..617c0ced0
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/python/prometheus/client/__init__.py
@@ -0,0 +1,12 @@
+ # Copyright 2013 Prometheus Team
+ # Licensed under the Apache License, Version 2.0 (the "License");
+ # you may not use this file except in compliance with the License.
+ # You may obtain a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
diff --git a/vendor/github.com/prometheus/client_model/python/prometheus/client/model/__init__.py b/vendor/github.com/prometheus/client_model/python/prometheus/client/model/__init__.py
new file mode 100644
index 000000000..d40327c32
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/python/prometheus/client/model/__init__.py
@@ -0,0 +1,14 @@
+ # Copyright 2013 Prometheus Team
+ # Licensed under the Apache License, Version 2.0 (the "License");
+ # you may not use this file except in compliance with the License.
+ # You may obtain a copy of the License at
+
+ # http://www.apache.org/licenses/LICENSE-2.0
+
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+
+__all__ = ['metrics_pb2']
diff --git a/vendor/github.com/prometheus/client_model/python/prometheus/client/model/metrics_pb2.py b/vendor/github.com/prometheus/client_model/python/prometheus/client/model/metrics_pb2.py
new file mode 100644
index 000000000..8c239ac06
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/python/prometheus/client/model/metrics_pb2.py
@@ -0,0 +1,575 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: metrics.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf.internal import enum_type_wrapper
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='metrics.proto',
+ package='io.prometheus.client',
+ serialized_pb=_b('\n\rmetrics.proto\x12\x14io.prometheus.client\"(\n\tLabelPair\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\x16\n\x05Gauge\x12\r\n\x05value\x18\x01 \x01(\x01\"\x18\n\x07\x43ounter\x12\r\n\x05value\x18\x01 \x01(\x01\"+\n\x08Quantile\x12\x10\n\x08quantile\x18\x01 \x01(\x01\x12\r\n\x05value\x18\x02 \x01(\x01\"e\n\x07Summary\x12\x14\n\x0csample_count\x18\x01 \x01(\x04\x12\x12\n\nsample_sum\x18\x02 \x01(\x01\x12\x30\n\x08quantile\x18\x03 \x03(\x0b\x32\x1e.io.prometheus.client.Quantile\"\x18\n\x07Untyped\x12\r\n\x05value\x18\x01 \x01(\x01\"c\n\tHistogram\x12\x14\n\x0csample_count\x18\x01 \x01(\x04\x12\x12\n\nsample_sum\x18\x02 \x01(\x01\x12,\n\x06\x62ucket\x18\x03 \x03(\x0b\x32\x1c.io.prometheus.client.Bucket\"7\n\x06\x42ucket\x12\x18\n\x10\x63umulative_count\x18\x01 \x01(\x04\x12\x13\n\x0bupper_bound\x18\x02 \x01(\x01\"\xbe\x02\n\x06Metric\x12.\n\x05label\x18\x01 \x03(\x0b\x32\x1f.io.prometheus.client.LabelPair\x12*\n\x05gauge\x18\x02 \x01(\x0b\x32\x1b.io.prometheus.client.Gauge\x12.\n\x07\x63ounter\x18\x03 \x01(\x0b\x32\x1d.io.prometheus.client.Counter\x12.\n\x07summary\x18\x04 \x01(\x0b\x32\x1d.io.prometheus.client.Summary\x12.\n\x07untyped\x18\x05 \x01(\x0b\x32\x1d.io.prometheus.client.Untyped\x12\x32\n\thistogram\x18\x07 \x01(\x0b\x32\x1f.io.prometheus.client.Histogram\x12\x14\n\x0ctimestamp_ms\x18\x06 \x01(\x03\"\x88\x01\n\x0cMetricFamily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04help\x18\x02 \x01(\t\x12.\n\x04type\x18\x03 \x01(\x0e\x32 .io.prometheus.client.MetricType\x12,\n\x06metric\x18\x04 \x03(\x0b\x32\x1c.io.prometheus.client.Metric*M\n\nMetricType\x12\x0b\n\x07\x43OUNTER\x10\x00\x12\t\n\x05GAUGE\x10\x01\x12\x0b\n\x07SUMMARY\x10\x02\x12\x0b\n\x07UNTYPED\x10\x03\x12\r\n\tHISTOGRAM\x10\x04\x42\x16\n\x14io.prometheus.client')
+)
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+_METRICTYPE = _descriptor.EnumDescriptor(
+ name='MetricType',
+ full_name='io.prometheus.client.MetricType',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='COUNTER', index=0, number=0,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='GAUGE', index=1, number=1,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='SUMMARY', index=2, number=2,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='UNTYPED', index=3, number=3,
+ options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='HISTOGRAM', index=4, number=4,
+ options=None,
+ type=None),
+ ],
+ containing_type=None,
+ options=None,
+ serialized_start=923,
+ serialized_end=1000,
+)
+_sym_db.RegisterEnumDescriptor(_METRICTYPE)
+
+MetricType = enum_type_wrapper.EnumTypeWrapper(_METRICTYPE)
+COUNTER = 0
+GAUGE = 1
+SUMMARY = 2
+UNTYPED = 3
+HISTOGRAM = 4
+
+
+
+_LABELPAIR = _descriptor.Descriptor(
+ name='LabelPair',
+ full_name='io.prometheus.client.LabelPair',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='io.prometheus.client.LabelPair.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='value', full_name='io.prometheus.client.LabelPair.value', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=39,
+ serialized_end=79,
+)
+
+
+_GAUGE = _descriptor.Descriptor(
+ name='Gauge',
+ full_name='io.prometheus.client.Gauge',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='value', full_name='io.prometheus.client.Gauge.value', index=0,
+ number=1, type=1, cpp_type=5, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=81,
+ serialized_end=103,
+)
+
+
+_COUNTER = _descriptor.Descriptor(
+ name='Counter',
+ full_name='io.prometheus.client.Counter',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='value', full_name='io.prometheus.client.Counter.value', index=0,
+ number=1, type=1, cpp_type=5, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=105,
+ serialized_end=129,
+)
+
+
+_QUANTILE = _descriptor.Descriptor(
+ name='Quantile',
+ full_name='io.prometheus.client.Quantile',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='quantile', full_name='io.prometheus.client.Quantile.quantile', index=0,
+ number=1, type=1, cpp_type=5, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='value', full_name='io.prometheus.client.Quantile.value', index=1,
+ number=2, type=1, cpp_type=5, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=131,
+ serialized_end=174,
+)
+
+
+_SUMMARY = _descriptor.Descriptor(
+ name='Summary',
+ full_name='io.prometheus.client.Summary',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='sample_count', full_name='io.prometheus.client.Summary.sample_count', index=0,
+ number=1, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='sample_sum', full_name='io.prometheus.client.Summary.sample_sum', index=1,
+ number=2, type=1, cpp_type=5, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='quantile', full_name='io.prometheus.client.Summary.quantile', index=2,
+ number=3, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=176,
+ serialized_end=277,
+)
+
+
+_UNTYPED = _descriptor.Descriptor(
+ name='Untyped',
+ full_name='io.prometheus.client.Untyped',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='value', full_name='io.prometheus.client.Untyped.value', index=0,
+ number=1, type=1, cpp_type=5, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=279,
+ serialized_end=303,
+)
+
+
+_HISTOGRAM = _descriptor.Descriptor(
+ name='Histogram',
+ full_name='io.prometheus.client.Histogram',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='sample_count', full_name='io.prometheus.client.Histogram.sample_count', index=0,
+ number=1, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='sample_sum', full_name='io.prometheus.client.Histogram.sample_sum', index=1,
+ number=2, type=1, cpp_type=5, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='bucket', full_name='io.prometheus.client.Histogram.bucket', index=2,
+ number=3, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=305,
+ serialized_end=404,
+)
+
+
+_BUCKET = _descriptor.Descriptor(
+ name='Bucket',
+ full_name='io.prometheus.client.Bucket',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='cumulative_count', full_name='io.prometheus.client.Bucket.cumulative_count', index=0,
+ number=1, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='upper_bound', full_name='io.prometheus.client.Bucket.upper_bound', index=1,
+ number=2, type=1, cpp_type=5, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=406,
+ serialized_end=461,
+)
+
+
+_METRIC = _descriptor.Descriptor(
+ name='Metric',
+ full_name='io.prometheus.client.Metric',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='label', full_name='io.prometheus.client.Metric.label', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='gauge', full_name='io.prometheus.client.Metric.gauge', index=1,
+ number=2, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='counter', full_name='io.prometheus.client.Metric.counter', index=2,
+ number=3, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='summary', full_name='io.prometheus.client.Metric.summary', index=3,
+ number=4, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='untyped', full_name='io.prometheus.client.Metric.untyped', index=4,
+ number=5, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='histogram', full_name='io.prometheus.client.Metric.histogram', index=5,
+ number=7, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='timestamp_ms', full_name='io.prometheus.client.Metric.timestamp_ms', index=6,
+ number=6, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=464,
+ serialized_end=782,
+)
+
+
+_METRICFAMILY = _descriptor.Descriptor(
+ name='MetricFamily',
+ full_name='io.prometheus.client.MetricFamily',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='io.prometheus.client.MetricFamily.name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='help', full_name='io.prometheus.client.MetricFamily.help', index=1,
+ number=2, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=_b("").decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='type', full_name='io.prometheus.client.MetricFamily.type', index=2,
+ number=3, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ _descriptor.FieldDescriptor(
+ name='metric', full_name='io.prometheus.client.MetricFamily.metric', index=3,
+ number=4, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ options=None),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ options=None,
+ is_extendable=False,
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=785,
+ serialized_end=921,
+)
+
+_SUMMARY.fields_by_name['quantile'].message_type = _QUANTILE
+_HISTOGRAM.fields_by_name['bucket'].message_type = _BUCKET
+_METRIC.fields_by_name['label'].message_type = _LABELPAIR
+_METRIC.fields_by_name['gauge'].message_type = _GAUGE
+_METRIC.fields_by_name['counter'].message_type = _COUNTER
+_METRIC.fields_by_name['summary'].message_type = _SUMMARY
+_METRIC.fields_by_name['untyped'].message_type = _UNTYPED
+_METRIC.fields_by_name['histogram'].message_type = _HISTOGRAM
+_METRICFAMILY.fields_by_name['type'].enum_type = _METRICTYPE
+_METRICFAMILY.fields_by_name['metric'].message_type = _METRIC
+DESCRIPTOR.message_types_by_name['LabelPair'] = _LABELPAIR
+DESCRIPTOR.message_types_by_name['Gauge'] = _GAUGE
+DESCRIPTOR.message_types_by_name['Counter'] = _COUNTER
+DESCRIPTOR.message_types_by_name['Quantile'] = _QUANTILE
+DESCRIPTOR.message_types_by_name['Summary'] = _SUMMARY
+DESCRIPTOR.message_types_by_name['Untyped'] = _UNTYPED
+DESCRIPTOR.message_types_by_name['Histogram'] = _HISTOGRAM
+DESCRIPTOR.message_types_by_name['Bucket'] = _BUCKET
+DESCRIPTOR.message_types_by_name['Metric'] = _METRIC
+DESCRIPTOR.message_types_by_name['MetricFamily'] = _METRICFAMILY
+DESCRIPTOR.enum_types_by_name['MetricType'] = _METRICTYPE
+
+LabelPair = _reflection.GeneratedProtocolMessageType('LabelPair', (_message.Message,), dict(
+ DESCRIPTOR = _LABELPAIR,
+ __module__ = 'metrics_pb2'
+ # @@protoc_insertion_point(class_scope:io.prometheus.client.LabelPair)
+ ))
+_sym_db.RegisterMessage(LabelPair)
+
+Gauge = _reflection.GeneratedProtocolMessageType('Gauge', (_message.Message,), dict(
+ DESCRIPTOR = _GAUGE,
+ __module__ = 'metrics_pb2'
+ # @@protoc_insertion_point(class_scope:io.prometheus.client.Gauge)
+ ))
+_sym_db.RegisterMessage(Gauge)
+
+Counter = _reflection.GeneratedProtocolMessageType('Counter', (_message.Message,), dict(
+ DESCRIPTOR = _COUNTER,
+ __module__ = 'metrics_pb2'
+ # @@protoc_insertion_point(class_scope:io.prometheus.client.Counter)
+ ))
+_sym_db.RegisterMessage(Counter)
+
+Quantile = _reflection.GeneratedProtocolMessageType('Quantile', (_message.Message,), dict(
+ DESCRIPTOR = _QUANTILE,
+ __module__ = 'metrics_pb2'
+ # @@protoc_insertion_point(class_scope:io.prometheus.client.Quantile)
+ ))
+_sym_db.RegisterMessage(Quantile)
+
+Summary = _reflection.GeneratedProtocolMessageType('Summary', (_message.Message,), dict(
+ DESCRIPTOR = _SUMMARY,
+ __module__ = 'metrics_pb2'
+ # @@protoc_insertion_point(class_scope:io.prometheus.client.Summary)
+ ))
+_sym_db.RegisterMessage(Summary)
+
+Untyped = _reflection.GeneratedProtocolMessageType('Untyped', (_message.Message,), dict(
+ DESCRIPTOR = _UNTYPED,
+ __module__ = 'metrics_pb2'
+ # @@protoc_insertion_point(class_scope:io.prometheus.client.Untyped)
+ ))
+_sym_db.RegisterMessage(Untyped)
+
+Histogram = _reflection.GeneratedProtocolMessageType('Histogram', (_message.Message,), dict(
+ DESCRIPTOR = _HISTOGRAM,
+ __module__ = 'metrics_pb2'
+ # @@protoc_insertion_point(class_scope:io.prometheus.client.Histogram)
+ ))
+_sym_db.RegisterMessage(Histogram)
+
+Bucket = _reflection.GeneratedProtocolMessageType('Bucket', (_message.Message,), dict(
+ DESCRIPTOR = _BUCKET,
+ __module__ = 'metrics_pb2'
+ # @@protoc_insertion_point(class_scope:io.prometheus.client.Bucket)
+ ))
+_sym_db.RegisterMessage(Bucket)
+
+Metric = _reflection.GeneratedProtocolMessageType('Metric', (_message.Message,), dict(
+ DESCRIPTOR = _METRIC,
+ __module__ = 'metrics_pb2'
+ # @@protoc_insertion_point(class_scope:io.prometheus.client.Metric)
+ ))
+_sym_db.RegisterMessage(Metric)
+
+MetricFamily = _reflection.GeneratedProtocolMessageType('MetricFamily', (_message.Message,), dict(
+ DESCRIPTOR = _METRICFAMILY,
+ __module__ = 'metrics_pb2'
+ # @@protoc_insertion_point(class_scope:io.prometheus.client.MetricFamily)
+ ))
+_sym_db.RegisterMessage(MetricFamily)
+
+
+DESCRIPTOR.has_options = True
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\024io.prometheus.client'))
+# @@protoc_insertion_point(module_scope)
diff --git a/vendor/github.com/prometheus/client_model/ruby/.gitignore b/vendor/github.com/prometheus/client_model/ruby/.gitignore
new file mode 100644
index 000000000..8442a4709
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/ruby/.gitignore
@@ -0,0 +1,5 @@
+*.gem
+.bundle
+Gemfile.lock
+pkg
+vendor/bundle
diff --git a/vendor/github.com/prometheus/client_model/ruby/Gemfile b/vendor/github.com/prometheus/client_model/ruby/Gemfile
new file mode 100644
index 000000000..1ff638cdc
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/ruby/Gemfile
@@ -0,0 +1,4 @@
+source 'https://rubygems.org'
+
+# Specify your gem's dependencies in prometheus-client-model.gemspec
+gemspec
diff --git a/vendor/github.com/prometheus/client_model/ruby/LICENSE b/vendor/github.com/prometheus/client_model/ruby/LICENSE
new file mode 100644
index 000000000..11069edd7
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/ruby/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/prometheus/client_model/ruby/Makefile b/vendor/github.com/prometheus/client_model/ruby/Makefile
new file mode 100644
index 000000000..09d544bf0
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/ruby/Makefile
@@ -0,0 +1,17 @@
+VENDOR_BUNDLE = vendor/bundle
+
+build: $(VENDOR_BUNDLE)/.bundled
+ BEEFCAKE_NAMESPACE=Prometheus::Client protoc --beefcake_out lib/prometheus/client/model -I .. ../metrics.proto
+
+$(VENDOR_BUNDLE):
+ mkdir -p $@
+
+$(VENDOR_BUNDLE)/.bundled: $(VENDOR_BUNDLE) Gemfile
+ bundle install --quiet --path $<
+ @touch $@
+
+clean:
+ -rm -f lib/prometheus/client/model/metrics.pb.rb
+ -rm -rf $(VENDOR_BUNDLE)
+
+.PHONY: build clean
diff --git a/vendor/github.com/prometheus/client_model/ruby/README.md b/vendor/github.com/prometheus/client_model/ruby/README.md
new file mode 100644
index 000000000..c45fcc7a9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/ruby/README.md
@@ -0,0 +1,31 @@
+# Prometheus Ruby client model
+
+Data model artifacts for the [Prometheus Ruby client][1].
+
+## Installation
+
+ gem install prometheus-client-model
+
+## Usage
+
+Build the artifacts from the protobuf specification:
+
+ make build
+
+While this Gem's main purpose is to define the Prometheus data types for the
+[client][1], it's possible to use it without the client to decode a stream of
+delimited protobuf messages:
+
+```ruby
+require 'open-uri'
+require 'prometheus/client/model'
+
+CONTENT_TYPE = 'application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited'
+
+stream = open('http://localhost:9090/metrics', 'Accept' => CONTENT_TYPE).read
+while family = Prometheus::Client::MetricFamily.read_delimited(stream)
+ puts family
+end
+```
+
+[1]: https://github.com/prometheus/client_ruby
diff --git a/vendor/github.com/prometheus/client_model/ruby/Rakefile b/vendor/github.com/prometheus/client_model/ruby/Rakefile
new file mode 100644
index 000000000..29955274e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/ruby/Rakefile
@@ -0,0 +1 @@
+require "bundler/gem_tasks"
diff --git a/vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model.rb b/vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model.rb
new file mode 100644
index 000000000..b5303bf1e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model.rb
@@ -0,0 +1,2 @@
+require 'prometheus/client/model/metrics.pb'
+require 'prometheus/client/model/version'
diff --git a/vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/metrics.pb.rb b/vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/metrics.pb.rb
new file mode 100644
index 000000000..a72114b8f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/metrics.pb.rb
@@ -0,0 +1,111 @@
+## Generated from metrics.proto for io.prometheus.client
+require "beefcake"
+
+module Prometheus
+ module Client
+
+ module MetricType
+ COUNTER = 0
+ GAUGE = 1
+ SUMMARY = 2
+ UNTYPED = 3
+ HISTOGRAM = 4
+ end
+
+ class LabelPair
+ include Beefcake::Message
+ end
+
+ class Gauge
+ include Beefcake::Message
+ end
+
+ class Counter
+ include Beefcake::Message
+ end
+
+ class Quantile
+ include Beefcake::Message
+ end
+
+ class Summary
+ include Beefcake::Message
+ end
+
+ class Untyped
+ include Beefcake::Message
+ end
+
+ class Histogram
+ include Beefcake::Message
+ end
+
+ class Bucket
+ include Beefcake::Message
+ end
+
+ class Metric
+ include Beefcake::Message
+ end
+
+ class MetricFamily
+ include Beefcake::Message
+ end
+
+ class LabelPair
+ optional :name, :string, 1
+ optional :value, :string, 2
+ end
+
+ class Gauge
+ optional :value, :double, 1
+ end
+
+ class Counter
+ optional :value, :double, 1
+ end
+
+ class Quantile
+ optional :quantile, :double, 1
+ optional :value, :double, 2
+ end
+
+ class Summary
+ optional :sample_count, :uint64, 1
+ optional :sample_sum, :double, 2
+ repeated :quantile, Quantile, 3
+ end
+
+ class Untyped
+ optional :value, :double, 1
+ end
+
+ class Histogram
+ optional :sample_count, :uint64, 1
+ optional :sample_sum, :double, 2
+ repeated :bucket, Bucket, 3
+ end
+
+ class Bucket
+ optional :cumulative_count, :uint64, 1
+ optional :upper_bound, :double, 2
+ end
+
+ class Metric
+ repeated :label, LabelPair, 1
+ optional :gauge, Gauge, 2
+ optional :counter, Counter, 3
+ optional :summary, Summary, 4
+ optional :untyped, Untyped, 5
+ optional :histogram, Histogram, 7
+ optional :timestamp_ms, :int64, 6
+ end
+
+ class MetricFamily
+ optional :name, :string, 1
+ optional :help, :string, 2
+ optional :type, MetricType, 3
+ repeated :metric, Metric, 4
+ end
+ end
+end
diff --git a/vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/version.rb b/vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/version.rb
new file mode 100644
index 000000000..00b5e863e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/version.rb
@@ -0,0 +1,7 @@
+module Prometheus
+ module Client
+ module Model
+ VERSION = '0.1.0'
+ end
+ end
+end
diff --git a/vendor/github.com/prometheus/client_model/ruby/prometheus-client-model.gemspec b/vendor/github.com/prometheus/client_model/ruby/prometheus-client-model.gemspec
new file mode 100644
index 000000000..438ba127e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/ruby/prometheus-client-model.gemspec
@@ -0,0 +1,22 @@
+# coding: utf-8
+lib = File.expand_path('../lib', __FILE__)
+$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
+require 'prometheus/client/model/version'
+
+Gem::Specification.new do |spec|
+ spec.name = 'prometheus-client-model'
+ spec.version = Prometheus::Client::Model::VERSION
+ spec.authors = ['Tobias Schmidt']
+ spec.email = ['tobidt@gmail.com']
+ spec.summary = 'Data model artifacts for the Prometheus Ruby client'
+ spec.homepage = 'https://github.com/prometheus/client_model/tree/master/ruby'
+ spec.license = 'Apache 2.0'
+
+ spec.files = %w[README.md LICENSE] + Dir.glob('{lib/**/*}')
+ spec.require_paths = ['lib']
+
+ spec.add_dependency 'beefcake', '>= 0.4.0'
+
+ spec.add_development_dependency 'bundler', '~> 1.3'
+ spec.add_development_dependency 'rake'
+end
diff --git a/vendor/github.com/prometheus/client_model/setup.py b/vendor/github.com/prometheus/client_model/setup.py
new file mode 100644
index 000000000..67b9f20e3
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/setup.py
@@ -0,0 +1,23 @@
+#!/usr/bin/python
+
+from setuptools import setup
+
+setup(
+ name = 'prometheus_client_model',
+ version = '0.0.1',
+ author = 'Matt T. Proud',
+ author_email = 'matt.proud@gmail.com',
+ description = 'Data model artifacts for the Prometheus client.',
+ license = 'Apache License 2.0',
+ url = 'http://github.com/prometheus/client_model',
+ packages = ['prometheus', 'prometheus/client', 'prometheus/client/model'],
+ package_dir = {'': 'python'},
+ requires = ['protobuf(==2.4.1)'],
+ platforms = 'Platform Independent',
+ classifiers = ['Development Status :: 3 - Alpha',
+ 'Intended Audience :: Developers',
+ 'Intended Audience :: System Administrators',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Operating System :: OS Independent',
+ 'Topic :: Software Development :: Testing',
+ 'Topic :: System :: Monitoring'])
diff --git a/vendor/github.com/prometheus/client_model/src/main/java/io/prometheus/client/Metrics.java b/vendor/github.com/prometheus/client_model/src/main/java/io/prometheus/client/Metrics.java
new file mode 100644
index 000000000..fb6218e1e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/src/main/java/io/prometheus/client/Metrics.java
@@ -0,0 +1,7683 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: metrics.proto
+
+package io.prometheus.client;
+
+public final class Metrics {
+ private Metrics() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ /**
+ * Protobuf enum {@code io.prometheus.client.MetricType}
+ */
+ public enum MetricType
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * <code>COUNTER = 0;</code>
+ */
+ COUNTER(0, 0),
+ /**
+ * <code>GAUGE = 1;</code>
+ */
+ GAUGE(1, 1),
+ /**
+ * <code>SUMMARY = 2;</code>
+ */
+ SUMMARY(2, 2),
+ /**
+ * <code>UNTYPED = 3;</code>
+ */
+ UNTYPED(3, 3),
+ /**
+ * <code>HISTOGRAM = 4;</code>
+ */
+ HISTOGRAM(4, 4),
+ ;
+
+ /**
+ * <code>COUNTER = 0;</code>
+ */
+ public static final int COUNTER_VALUE = 0;
+ /**
+ * <code>GAUGE = 1;</code>
+ */
+ public static final int GAUGE_VALUE = 1;
+ /**
+ * <code>SUMMARY = 2;</code>
+ */
+ public static final int SUMMARY_VALUE = 2;
+ /**
+ * <code>UNTYPED = 3;</code>
+ */
+ public static final int UNTYPED_VALUE = 3;
+ /**
+ * <code>HISTOGRAM = 4;</code>
+ */
+ public static final int HISTOGRAM_VALUE = 4;
+
+
+ public final int getNumber() { return value; }
+
+ public static MetricType valueOf(int value) {
+ switch (value) {
+ case 0: return COUNTER;
+ case 1: return GAUGE;
+ case 2: return SUMMARY;
+ case 3: return UNTYPED;
+ case 4: return HISTOGRAM;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap<MetricType>
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap<MetricType>
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap<MetricType>() {
+ public MetricType findValueByNumber(int number) {
+ return MetricType.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.getDescriptor().getEnumTypes().get(0);
+ }
+
+ private static final MetricType[] VALUES = values();
+
+ public static MetricType valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private MetricType(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:io.prometheus.client.MetricType)
+ }
+
+ public interface LabelPairOrBuilder extends
+ // @@protoc_insertion_point(interface_extends:io.prometheus.client.LabelPair)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ boolean hasName();
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ java.lang.String getName();
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ com.google.protobuf.ByteString
+ getNameBytes();
+
+ /**
+ * <code>optional string value = 2;</code>
+ */
+ boolean hasValue();
+ /**
+ * <code>optional string value = 2;</code>
+ */
+ java.lang.String getValue();
+ /**
+ * <code>optional string value = 2;</code>
+ */
+ com.google.protobuf.ByteString
+ getValueBytes();
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.LabelPair}
+ */
+ public static final class LabelPair extends
+ com.google.protobuf.GeneratedMessage implements
+ // @@protoc_insertion_point(message_implements:io.prometheus.client.LabelPair)
+ LabelPairOrBuilder {
+ // Use LabelPair.newBuilder() to construct.
+ private LabelPair(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private LabelPair(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final LabelPair defaultInstance;
+ public static LabelPair getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public LabelPair getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private LabelPair(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ com.google.protobuf.ByteString bs = input.readBytes();
+ bitField0_ |= 0x00000001;
+ name_ = bs;
+ break;
+ }
+ case 18: {
+ com.google.protobuf.ByteString bs = input.readBytes();
+ bitField0_ |= 0x00000002;
+ value_ = bs;
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_LabelPair_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_LabelPair_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.LabelPair.class, io.prometheus.client.Metrics.LabelPair.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<LabelPair> PARSER =
+ new com.google.protobuf.AbstractParser<LabelPair>() {
+ public LabelPair parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new LabelPair(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<LabelPair> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private java.lang.Object name_;
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ name_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int VALUE_FIELD_NUMBER = 2;
+ private java.lang.Object value_;
+ /**
+ * <code>optional string value = 2;</code>
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional string value = 2;</code>
+ */
+ public java.lang.String getValue() {
+ java.lang.Object ref = value_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ value_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>optional string value = 2;</code>
+ */
+ public com.google.protobuf.ByteString
+ getValueBytes() {
+ java.lang.Object ref = value_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ value_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ name_ = "";
+ value_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getValueBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getValueBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static io.prometheus.client.Metrics.LabelPair parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.LabelPair parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.LabelPair parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.LabelPair parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.LabelPair parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.LabelPair parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.LabelPair parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static io.prometheus.client.Metrics.LabelPair parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.LabelPair parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.LabelPair parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(io.prometheus.client.Metrics.LabelPair prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.LabelPair}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder> implements
+ // @@protoc_insertion_point(builder_implements:io.prometheus.client.LabelPair)
+ io.prometheus.client.Metrics.LabelPairOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_LabelPair_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_LabelPair_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.LabelPair.class, io.prometheus.client.Metrics.LabelPair.Builder.class);
+ }
+
+ // Construct using io.prometheus.client.Metrics.LabelPair.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ name_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ value_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_LabelPair_descriptor;
+ }
+
+ public io.prometheus.client.Metrics.LabelPair getDefaultInstanceForType() {
+ return io.prometheus.client.Metrics.LabelPair.getDefaultInstance();
+ }
+
+ public io.prometheus.client.Metrics.LabelPair build() {
+ io.prometheus.client.Metrics.LabelPair result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public io.prometheus.client.Metrics.LabelPair buildPartial() {
+ io.prometheus.client.Metrics.LabelPair result = new io.prometheus.client.Metrics.LabelPair(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.name_ = name_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.value_ = value_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof io.prometheus.client.Metrics.LabelPair) {
+ return mergeFrom((io.prometheus.client.Metrics.LabelPair)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(io.prometheus.client.Metrics.LabelPair other) {
+ if (other == io.prometheus.client.Metrics.LabelPair.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ bitField0_ |= 0x00000001;
+ name_ = other.name_;
+ onChanged();
+ }
+ if (other.hasValue()) {
+ bitField0_ |= 0x00000002;
+ value_ = other.value_;
+ onChanged();
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ io.prometheus.client.Metrics.LabelPair parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (io.prometheus.client.Metrics.LabelPair) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ private java.lang.Object name_ = "";
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ name_ = s;
+ }
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public Builder setName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public Builder clearName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ name_ = getDefaultInstance().getName();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public Builder setNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+
+ private java.lang.Object value_ = "";
+ /**
+ * <code>optional string value = 2;</code>
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional string value = 2;</code>
+ */
+ public java.lang.String getValue() {
+ java.lang.Object ref = value_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ value_ = s;
+ }
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>optional string value = 2;</code>
+ */
+ public com.google.protobuf.ByteString
+ getValueBytes() {
+ java.lang.Object ref = value_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ value_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>optional string value = 2;</code>
+ */
+ public Builder setValue(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ value_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string value = 2;</code>
+ */
+ public Builder clearValue() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ value_ = getDefaultInstance().getValue();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string value = 2;</code>
+ */
+ public Builder setValueBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ value_ = value;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:io.prometheus.client.LabelPair)
+ }
+
+ static {
+ defaultInstance = new LabelPair(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.LabelPair)
+ }
+
+ public interface GaugeOrBuilder extends
+ // @@protoc_insertion_point(interface_extends:io.prometheus.client.Gauge)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ boolean hasValue();
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ double getValue();
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.Gauge}
+ */
+ public static final class Gauge extends
+ com.google.protobuf.GeneratedMessage implements
+ // @@protoc_insertion_point(message_implements:io.prometheus.client.Gauge)
+ GaugeOrBuilder {
+ // Use Gauge.newBuilder() to construct.
+ private Gauge(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Gauge(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Gauge defaultInstance;
+ public static Gauge getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Gauge getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Gauge(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 9: {
+ bitField0_ |= 0x00000001;
+ value_ = input.readDouble();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Gauge_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Gauge_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.Gauge.class, io.prometheus.client.Metrics.Gauge.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<Gauge> PARSER =
+ new com.google.protobuf.AbstractParser<Gauge>() {
+ public Gauge parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Gauge(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<Gauge> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ public static final int VALUE_FIELD_NUMBER = 1;
+ private double value_;
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ public double getValue() {
+ return value_;
+ }
+
+ private void initFields() {
+ value_ = 0D;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeDouble(1, value_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeDoubleSize(1, value_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static io.prometheus.client.Metrics.Gauge parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.Gauge parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Gauge parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.Gauge parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Gauge parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Gauge parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Gauge parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Gauge parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Gauge parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Gauge parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(io.prometheus.client.Metrics.Gauge prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.Gauge}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder> implements
+ // @@protoc_insertion_point(builder_implements:io.prometheus.client.Gauge)
+ io.prometheus.client.Metrics.GaugeOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Gauge_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Gauge_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.Gauge.class, io.prometheus.client.Metrics.Gauge.Builder.class);
+ }
+
+ // Construct using io.prometheus.client.Metrics.Gauge.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ value_ = 0D;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Gauge_descriptor;
+ }
+
+ public io.prometheus.client.Metrics.Gauge getDefaultInstanceForType() {
+ return io.prometheus.client.Metrics.Gauge.getDefaultInstance();
+ }
+
+ public io.prometheus.client.Metrics.Gauge build() {
+ io.prometheus.client.Metrics.Gauge result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public io.prometheus.client.Metrics.Gauge buildPartial() {
+ io.prometheus.client.Metrics.Gauge result = new io.prometheus.client.Metrics.Gauge(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.value_ = value_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof io.prometheus.client.Metrics.Gauge) {
+ return mergeFrom((io.prometheus.client.Metrics.Gauge)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(io.prometheus.client.Metrics.Gauge other) {
+ if (other == io.prometheus.client.Metrics.Gauge.getDefaultInstance()) return this;
+ if (other.hasValue()) {
+ setValue(other.getValue());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ io.prometheus.client.Metrics.Gauge parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (io.prometheus.client.Metrics.Gauge) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ private double value_ ;
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ public double getValue() {
+ return value_;
+ }
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ public Builder setValue(double value) {
+ bitField0_ |= 0x00000001;
+ value_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ public Builder clearValue() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ value_ = 0D;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:io.prometheus.client.Gauge)
+ }
+
+ static {
+ defaultInstance = new Gauge(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.Gauge)
+ }
+
+ public interface CounterOrBuilder extends
+ // @@protoc_insertion_point(interface_extends:io.prometheus.client.Counter)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ boolean hasValue();
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ double getValue();
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.Counter}
+ */
+ public static final class Counter extends
+ com.google.protobuf.GeneratedMessage implements
+ // @@protoc_insertion_point(message_implements:io.prometheus.client.Counter)
+ CounterOrBuilder {
+ // Use Counter.newBuilder() to construct.
+ private Counter(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Counter(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Counter defaultInstance;
+ public static Counter getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Counter getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Counter(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 9: {
+ bitField0_ |= 0x00000001;
+ value_ = input.readDouble();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Counter_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Counter_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.Counter.class, io.prometheus.client.Metrics.Counter.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<Counter> PARSER =
+ new com.google.protobuf.AbstractParser<Counter>() {
+ public Counter parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Counter(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<Counter> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ public static final int VALUE_FIELD_NUMBER = 1;
+ private double value_;
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ public double getValue() {
+ return value_;
+ }
+
+ private void initFields() {
+ value_ = 0D;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeDouble(1, value_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeDoubleSize(1, value_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static io.prometheus.client.Metrics.Counter parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.Counter parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Counter parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.Counter parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Counter parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Counter parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Counter parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Counter parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Counter parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Counter parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(io.prometheus.client.Metrics.Counter prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.Counter}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder> implements
+ // @@protoc_insertion_point(builder_implements:io.prometheus.client.Counter)
+ io.prometheus.client.Metrics.CounterOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Counter_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Counter_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.Counter.class, io.prometheus.client.Metrics.Counter.Builder.class);
+ }
+
+ // Construct using io.prometheus.client.Metrics.Counter.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ value_ = 0D;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Counter_descriptor;
+ }
+
+ public io.prometheus.client.Metrics.Counter getDefaultInstanceForType() {
+ return io.prometheus.client.Metrics.Counter.getDefaultInstance();
+ }
+
+ public io.prometheus.client.Metrics.Counter build() {
+ io.prometheus.client.Metrics.Counter result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public io.prometheus.client.Metrics.Counter buildPartial() {
+ io.prometheus.client.Metrics.Counter result = new io.prometheus.client.Metrics.Counter(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.value_ = value_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof io.prometheus.client.Metrics.Counter) {
+ return mergeFrom((io.prometheus.client.Metrics.Counter)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(io.prometheus.client.Metrics.Counter other) {
+ if (other == io.prometheus.client.Metrics.Counter.getDefaultInstance()) return this;
+ if (other.hasValue()) {
+ setValue(other.getValue());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ io.prometheus.client.Metrics.Counter parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (io.prometheus.client.Metrics.Counter) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ private double value_ ;
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ public double getValue() {
+ return value_;
+ }
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ public Builder setValue(double value) {
+ bitField0_ |= 0x00000001;
+ value_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ public Builder clearValue() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ value_ = 0D;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:io.prometheus.client.Counter)
+ }
+
+ static {
+ defaultInstance = new Counter(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.Counter)
+ }
+
+ public interface QuantileOrBuilder extends
+ // @@protoc_insertion_point(interface_extends:io.prometheus.client.Quantile)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ * <code>optional double quantile = 1;</code>
+ */
+ boolean hasQuantile();
+ /**
+ * <code>optional double quantile = 1;</code>
+ */
+ double getQuantile();
+
+ /**
+ * <code>optional double value = 2;</code>
+ */
+ boolean hasValue();
+ /**
+ * <code>optional double value = 2;</code>
+ */
+ double getValue();
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.Quantile}
+ */
+ public static final class Quantile extends
+ com.google.protobuf.GeneratedMessage implements
+ // @@protoc_insertion_point(message_implements:io.prometheus.client.Quantile)
+ QuantileOrBuilder {
+ // Use Quantile.newBuilder() to construct.
+ private Quantile(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Quantile(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Quantile defaultInstance;
+ public static Quantile getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Quantile getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Quantile(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 9: {
+ bitField0_ |= 0x00000001;
+ quantile_ = input.readDouble();
+ break;
+ }
+ case 17: {
+ bitField0_ |= 0x00000002;
+ value_ = input.readDouble();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Quantile_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Quantile_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.Quantile.class, io.prometheus.client.Metrics.Quantile.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<Quantile> PARSER =
+ new com.google.protobuf.AbstractParser<Quantile>() {
+ public Quantile parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Quantile(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<Quantile> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ public static final int QUANTILE_FIELD_NUMBER = 1;
+ private double quantile_;
+ /**
+ * <code>optional double quantile = 1;</code>
+ */
+ public boolean hasQuantile() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional double quantile = 1;</code>
+ */
+ public double getQuantile() {
+ return quantile_;
+ }
+
+ public static final int VALUE_FIELD_NUMBER = 2;
+ private double value_;
+ /**
+ * <code>optional double value = 2;</code>
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional double value = 2;</code>
+ */
+ public double getValue() {
+ return value_;
+ }
+
+ private void initFields() {
+ quantile_ = 0D;
+ value_ = 0D;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeDouble(1, quantile_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeDouble(2, value_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeDoubleSize(1, quantile_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeDoubleSize(2, value_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static io.prometheus.client.Metrics.Quantile parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.Quantile parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Quantile parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.Quantile parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Quantile parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Quantile parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Quantile parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Quantile parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Quantile parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Quantile parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(io.prometheus.client.Metrics.Quantile prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.Quantile}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder> implements
+ // @@protoc_insertion_point(builder_implements:io.prometheus.client.Quantile)
+ io.prometheus.client.Metrics.QuantileOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Quantile_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Quantile_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.Quantile.class, io.prometheus.client.Metrics.Quantile.Builder.class);
+ }
+
+ // Construct using io.prometheus.client.Metrics.Quantile.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ quantile_ = 0D;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ value_ = 0D;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Quantile_descriptor;
+ }
+
+ public io.prometheus.client.Metrics.Quantile getDefaultInstanceForType() {
+ return io.prometheus.client.Metrics.Quantile.getDefaultInstance();
+ }
+
+ public io.prometheus.client.Metrics.Quantile build() {
+ io.prometheus.client.Metrics.Quantile result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public io.prometheus.client.Metrics.Quantile buildPartial() {
+ io.prometheus.client.Metrics.Quantile result = new io.prometheus.client.Metrics.Quantile(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.quantile_ = quantile_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.value_ = value_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof io.prometheus.client.Metrics.Quantile) {
+ return mergeFrom((io.prometheus.client.Metrics.Quantile)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(io.prometheus.client.Metrics.Quantile other) {
+ if (other == io.prometheus.client.Metrics.Quantile.getDefaultInstance()) return this;
+ if (other.hasQuantile()) {
+ setQuantile(other.getQuantile());
+ }
+ if (other.hasValue()) {
+ setValue(other.getValue());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ io.prometheus.client.Metrics.Quantile parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (io.prometheus.client.Metrics.Quantile) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ private double quantile_ ;
+ /**
+ * <code>optional double quantile = 1;</code>
+ */
+ public boolean hasQuantile() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional double quantile = 1;</code>
+ */
+ public double getQuantile() {
+ return quantile_;
+ }
+ /**
+ * <code>optional double quantile = 1;</code>
+ */
+ public Builder setQuantile(double value) {
+ bitField0_ |= 0x00000001;
+ quantile_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional double quantile = 1;</code>
+ */
+ public Builder clearQuantile() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ quantile_ = 0D;
+ onChanged();
+ return this;
+ }
+
+ private double value_ ;
+ /**
+ * <code>optional double value = 2;</code>
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional double value = 2;</code>
+ */
+ public double getValue() {
+ return value_;
+ }
+ /**
+ * <code>optional double value = 2;</code>
+ */
+ public Builder setValue(double value) {
+ bitField0_ |= 0x00000002;
+ value_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional double value = 2;</code>
+ */
+ public Builder clearValue() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ value_ = 0D;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:io.prometheus.client.Quantile)
+ }
+
+ static {
+ defaultInstance = new Quantile(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.Quantile)
+ }
+
+ public interface SummaryOrBuilder extends
+ // @@protoc_insertion_point(interface_extends:io.prometheus.client.Summary)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ * <code>optional uint64 sample_count = 1;</code>
+ */
+ boolean hasSampleCount();
+ /**
+ * <code>optional uint64 sample_count = 1;</code>
+ */
+ long getSampleCount();
+
+ /**
+ * <code>optional double sample_sum = 2;</code>
+ */
+ boolean hasSampleSum();
+ /**
+ * <code>optional double sample_sum = 2;</code>
+ */
+ double getSampleSum();
+
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ java.util.List<io.prometheus.client.Metrics.Quantile>
+ getQuantileList();
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ io.prometheus.client.Metrics.Quantile getQuantile(int index);
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ int getQuantileCount();
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ java.util.List<? extends io.prometheus.client.Metrics.QuantileOrBuilder>
+ getQuantileOrBuilderList();
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ io.prometheus.client.Metrics.QuantileOrBuilder getQuantileOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.Summary}
+ */
+ public static final class Summary extends
+ com.google.protobuf.GeneratedMessage implements
+ // @@protoc_insertion_point(message_implements:io.prometheus.client.Summary)
+ SummaryOrBuilder {
+ // Use Summary.newBuilder() to construct.
+ private Summary(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Summary(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Summary defaultInstance;
+ public static Summary getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Summary getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Summary(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ sampleCount_ = input.readUInt64();
+ break;
+ }
+ case 17: {
+ bitField0_ |= 0x00000002;
+ sampleSum_ = input.readDouble();
+ break;
+ }
+ case 26: {
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ quantile_ = new java.util.ArrayList<io.prometheus.client.Metrics.Quantile>();
+ mutable_bitField0_ |= 0x00000004;
+ }
+ quantile_.add(input.readMessage(io.prometheus.client.Metrics.Quantile.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ quantile_ = java.util.Collections.unmodifiableList(quantile_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Summary_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Summary_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.Summary.class, io.prometheus.client.Metrics.Summary.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<Summary> PARSER =
+ new com.google.protobuf.AbstractParser<Summary>() {
+ public Summary parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Summary(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<Summary> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ public static final int SAMPLE_COUNT_FIELD_NUMBER = 1;
+ private long sampleCount_;
+ /**
+ * <code>optional uint64 sample_count = 1;</code>
+ */
+ public boolean hasSampleCount() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional uint64 sample_count = 1;</code>
+ */
+ public long getSampleCount() {
+ return sampleCount_;
+ }
+
+ public static final int SAMPLE_SUM_FIELD_NUMBER = 2;
+ private double sampleSum_;
+ /**
+ * <code>optional double sample_sum = 2;</code>
+ */
+ public boolean hasSampleSum() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional double sample_sum = 2;</code>
+ */
+ public double getSampleSum() {
+ return sampleSum_;
+ }
+
+ public static final int QUANTILE_FIELD_NUMBER = 3;
+ private java.util.List<io.prometheus.client.Metrics.Quantile> quantile_;
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public java.util.List<io.prometheus.client.Metrics.Quantile> getQuantileList() {
+ return quantile_;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public java.util.List<? extends io.prometheus.client.Metrics.QuantileOrBuilder>
+ getQuantileOrBuilderList() {
+ return quantile_;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public int getQuantileCount() {
+ return quantile_.size();
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public io.prometheus.client.Metrics.Quantile getQuantile(int index) {
+ return quantile_.get(index);
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public io.prometheus.client.Metrics.QuantileOrBuilder getQuantileOrBuilder(
+ int index) {
+ return quantile_.get(index);
+ }
+
+ private void initFields() {
+ sampleCount_ = 0L;
+ sampleSum_ = 0D;
+ quantile_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeUInt64(1, sampleCount_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeDouble(2, sampleSum_);
+ }
+ for (int i = 0; i < quantile_.size(); i++) {
+ output.writeMessage(3, quantile_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(1, sampleCount_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeDoubleSize(2, sampleSum_);
+ }
+ for (int i = 0; i < quantile_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, quantile_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static io.prometheus.client.Metrics.Summary parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.Summary parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Summary parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.Summary parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Summary parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Summary parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Summary parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Summary parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Summary parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Summary parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(io.prometheus.client.Metrics.Summary prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.Summary}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder> implements
+ // @@protoc_insertion_point(builder_implements:io.prometheus.client.Summary)
+ io.prometheus.client.Metrics.SummaryOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Summary_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Summary_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.Summary.class, io.prometheus.client.Metrics.Summary.Builder.class);
+ }
+
+ // Construct using io.prometheus.client.Metrics.Summary.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getQuantileFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ sampleCount_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ sampleSum_ = 0D;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (quantileBuilder_ == null) {
+ quantile_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ quantileBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Summary_descriptor;
+ }
+
+ public io.prometheus.client.Metrics.Summary getDefaultInstanceForType() {
+ return io.prometheus.client.Metrics.Summary.getDefaultInstance();
+ }
+
+ public io.prometheus.client.Metrics.Summary build() {
+ io.prometheus.client.Metrics.Summary result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public io.prometheus.client.Metrics.Summary buildPartial() {
+ io.prometheus.client.Metrics.Summary result = new io.prometheus.client.Metrics.Summary(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.sampleCount_ = sampleCount_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.sampleSum_ = sampleSum_;
+ if (quantileBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ quantile_ = java.util.Collections.unmodifiableList(quantile_);
+ bitField0_ = (bitField0_ & ~0x00000004);
+ }
+ result.quantile_ = quantile_;
+ } else {
+ result.quantile_ = quantileBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof io.prometheus.client.Metrics.Summary) {
+ return mergeFrom((io.prometheus.client.Metrics.Summary)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(io.prometheus.client.Metrics.Summary other) {
+ if (other == io.prometheus.client.Metrics.Summary.getDefaultInstance()) return this;
+ if (other.hasSampleCount()) {
+ setSampleCount(other.getSampleCount());
+ }
+ if (other.hasSampleSum()) {
+ setSampleSum(other.getSampleSum());
+ }
+ if (quantileBuilder_ == null) {
+ if (!other.quantile_.isEmpty()) {
+ if (quantile_.isEmpty()) {
+ quantile_ = other.quantile_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ ensureQuantileIsMutable();
+ quantile_.addAll(other.quantile_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.quantile_.isEmpty()) {
+ if (quantileBuilder_.isEmpty()) {
+ quantileBuilder_.dispose();
+ quantileBuilder_ = null;
+ quantile_ = other.quantile_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ quantileBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getQuantileFieldBuilder() : null;
+ } else {
+ quantileBuilder_.addAllMessages(other.quantile_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ io.prometheus.client.Metrics.Summary parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (io.prometheus.client.Metrics.Summary) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ private long sampleCount_ ;
+ /**
+ * <code>optional uint64 sample_count = 1;</code>
+ */
+ public boolean hasSampleCount() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional uint64 sample_count = 1;</code>
+ */
+ public long getSampleCount() {
+ return sampleCount_;
+ }
+ /**
+ * <code>optional uint64 sample_count = 1;</code>
+ */
+ public Builder setSampleCount(long value) {
+ bitField0_ |= 0x00000001;
+ sampleCount_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional uint64 sample_count = 1;</code>
+ */
+ public Builder clearSampleCount() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ sampleCount_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ private double sampleSum_ ;
+ /**
+ * <code>optional double sample_sum = 2;</code>
+ */
+ public boolean hasSampleSum() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional double sample_sum = 2;</code>
+ */
+ public double getSampleSum() {
+ return sampleSum_;
+ }
+ /**
+ * <code>optional double sample_sum = 2;</code>
+ */
+ public Builder setSampleSum(double value) {
+ bitField0_ |= 0x00000002;
+ sampleSum_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional double sample_sum = 2;</code>
+ */
+ public Builder clearSampleSum() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ sampleSum_ = 0D;
+ onChanged();
+ return this;
+ }
+
+ private java.util.List<io.prometheus.client.Metrics.Quantile> quantile_ =
+ java.util.Collections.emptyList();
+ private void ensureQuantileIsMutable() {
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
+ quantile_ = new java.util.ArrayList<io.prometheus.client.Metrics.Quantile>(quantile_);
+ bitField0_ |= 0x00000004;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ io.prometheus.client.Metrics.Quantile, io.prometheus.client.Metrics.Quantile.Builder, io.prometheus.client.Metrics.QuantileOrBuilder> quantileBuilder_;
+
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public java.util.List<io.prometheus.client.Metrics.Quantile> getQuantileList() {
+ if (quantileBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(quantile_);
+ } else {
+ return quantileBuilder_.getMessageList();
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public int getQuantileCount() {
+ if (quantileBuilder_ == null) {
+ return quantile_.size();
+ } else {
+ return quantileBuilder_.getCount();
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public io.prometheus.client.Metrics.Quantile getQuantile(int index) {
+ if (quantileBuilder_ == null) {
+ return quantile_.get(index);
+ } else {
+ return quantileBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public Builder setQuantile(
+ int index, io.prometheus.client.Metrics.Quantile value) {
+ if (quantileBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureQuantileIsMutable();
+ quantile_.set(index, value);
+ onChanged();
+ } else {
+ quantileBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public Builder setQuantile(
+ int index, io.prometheus.client.Metrics.Quantile.Builder builderForValue) {
+ if (quantileBuilder_ == null) {
+ ensureQuantileIsMutable();
+ quantile_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ quantileBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public Builder addQuantile(io.prometheus.client.Metrics.Quantile value) {
+ if (quantileBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureQuantileIsMutable();
+ quantile_.add(value);
+ onChanged();
+ } else {
+ quantileBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public Builder addQuantile(
+ int index, io.prometheus.client.Metrics.Quantile value) {
+ if (quantileBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureQuantileIsMutable();
+ quantile_.add(index, value);
+ onChanged();
+ } else {
+ quantileBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public Builder addQuantile(
+ io.prometheus.client.Metrics.Quantile.Builder builderForValue) {
+ if (quantileBuilder_ == null) {
+ ensureQuantileIsMutable();
+ quantile_.add(builderForValue.build());
+ onChanged();
+ } else {
+ quantileBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public Builder addQuantile(
+ int index, io.prometheus.client.Metrics.Quantile.Builder builderForValue) {
+ if (quantileBuilder_ == null) {
+ ensureQuantileIsMutable();
+ quantile_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ quantileBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public Builder addAllQuantile(
+ java.lang.Iterable<? extends io.prometheus.client.Metrics.Quantile> values) {
+ if (quantileBuilder_ == null) {
+ ensureQuantileIsMutable();
+ com.google.protobuf.AbstractMessageLite.Builder.addAll(
+ values, quantile_);
+ onChanged();
+ } else {
+ quantileBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public Builder clearQuantile() {
+ if (quantileBuilder_ == null) {
+ quantile_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ onChanged();
+ } else {
+ quantileBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public Builder removeQuantile(int index) {
+ if (quantileBuilder_ == null) {
+ ensureQuantileIsMutable();
+ quantile_.remove(index);
+ onChanged();
+ } else {
+ quantileBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public io.prometheus.client.Metrics.Quantile.Builder getQuantileBuilder(
+ int index) {
+ return getQuantileFieldBuilder().getBuilder(index);
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public io.prometheus.client.Metrics.QuantileOrBuilder getQuantileOrBuilder(
+ int index) {
+ if (quantileBuilder_ == null) {
+ return quantile_.get(index); } else {
+ return quantileBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public java.util.List<? extends io.prometheus.client.Metrics.QuantileOrBuilder>
+ getQuantileOrBuilderList() {
+ if (quantileBuilder_ != null) {
+ return quantileBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(quantile_);
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public io.prometheus.client.Metrics.Quantile.Builder addQuantileBuilder() {
+ return getQuantileFieldBuilder().addBuilder(
+ io.prometheus.client.Metrics.Quantile.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public io.prometheus.client.Metrics.Quantile.Builder addQuantileBuilder(
+ int index) {
+ return getQuantileFieldBuilder().addBuilder(
+ index, io.prometheus.client.Metrics.Quantile.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Quantile quantile = 3;</code>
+ */
+ public java.util.List<io.prometheus.client.Metrics.Quantile.Builder>
+ getQuantileBuilderList() {
+ return getQuantileFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ io.prometheus.client.Metrics.Quantile, io.prometheus.client.Metrics.Quantile.Builder, io.prometheus.client.Metrics.QuantileOrBuilder>
+ getQuantileFieldBuilder() {
+ if (quantileBuilder_ == null) {
+ quantileBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ io.prometheus.client.Metrics.Quantile, io.prometheus.client.Metrics.Quantile.Builder, io.prometheus.client.Metrics.QuantileOrBuilder>(
+ quantile_,
+ ((bitField0_ & 0x00000004) == 0x00000004),
+ getParentForChildren(),
+ isClean());
+ quantile_ = null;
+ }
+ return quantileBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:io.prometheus.client.Summary)
+ }
+
+ static {
+ defaultInstance = new Summary(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.Summary)
+ }
+
+ public interface UntypedOrBuilder extends
+ // @@protoc_insertion_point(interface_extends:io.prometheus.client.Untyped)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ boolean hasValue();
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ double getValue();
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.Untyped}
+ */
+ public static final class Untyped extends
+ com.google.protobuf.GeneratedMessage implements
+ // @@protoc_insertion_point(message_implements:io.prometheus.client.Untyped)
+ UntypedOrBuilder {
+ // Use Untyped.newBuilder() to construct.
+ private Untyped(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Untyped(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Untyped defaultInstance;
+ public static Untyped getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Untyped getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Untyped(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 9: {
+ bitField0_ |= 0x00000001;
+ value_ = input.readDouble();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Untyped_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Untyped_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.Untyped.class, io.prometheus.client.Metrics.Untyped.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<Untyped> PARSER =
+ new com.google.protobuf.AbstractParser<Untyped>() {
+ public Untyped parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Untyped(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<Untyped> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ public static final int VALUE_FIELD_NUMBER = 1;
+ private double value_;
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ public double getValue() {
+ return value_;
+ }
+
+ private void initFields() {
+ value_ = 0D;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeDouble(1, value_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeDoubleSize(1, value_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static io.prometheus.client.Metrics.Untyped parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.Untyped parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Untyped parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.Untyped parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Untyped parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Untyped parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Untyped parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Untyped parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Untyped parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Untyped parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(io.prometheus.client.Metrics.Untyped prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.Untyped}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder> implements
+ // @@protoc_insertion_point(builder_implements:io.prometheus.client.Untyped)
+ io.prometheus.client.Metrics.UntypedOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Untyped_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Untyped_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.Untyped.class, io.prometheus.client.Metrics.Untyped.Builder.class);
+ }
+
+ // Construct using io.prometheus.client.Metrics.Untyped.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ value_ = 0D;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Untyped_descriptor;
+ }
+
+ public io.prometheus.client.Metrics.Untyped getDefaultInstanceForType() {
+ return io.prometheus.client.Metrics.Untyped.getDefaultInstance();
+ }
+
+ public io.prometheus.client.Metrics.Untyped build() {
+ io.prometheus.client.Metrics.Untyped result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public io.prometheus.client.Metrics.Untyped buildPartial() {
+ io.prometheus.client.Metrics.Untyped result = new io.prometheus.client.Metrics.Untyped(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.value_ = value_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof io.prometheus.client.Metrics.Untyped) {
+ return mergeFrom((io.prometheus.client.Metrics.Untyped)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(io.prometheus.client.Metrics.Untyped other) {
+ if (other == io.prometheus.client.Metrics.Untyped.getDefaultInstance()) return this;
+ if (other.hasValue()) {
+ setValue(other.getValue());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ io.prometheus.client.Metrics.Untyped parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (io.prometheus.client.Metrics.Untyped) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ private double value_ ;
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ public double getValue() {
+ return value_;
+ }
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ public Builder setValue(double value) {
+ bitField0_ |= 0x00000001;
+ value_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional double value = 1;</code>
+ */
+ public Builder clearValue() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ value_ = 0D;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:io.prometheus.client.Untyped)
+ }
+
+ static {
+ defaultInstance = new Untyped(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.Untyped)
+ }
+
+ public interface HistogramOrBuilder extends
+ // @@protoc_insertion_point(interface_extends:io.prometheus.client.Histogram)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ * <code>optional uint64 sample_count = 1;</code>
+ */
+ boolean hasSampleCount();
+ /**
+ * <code>optional uint64 sample_count = 1;</code>
+ */
+ long getSampleCount();
+
+ /**
+ * <code>optional double sample_sum = 2;</code>
+ */
+ boolean hasSampleSum();
+ /**
+ * <code>optional double sample_sum = 2;</code>
+ */
+ double getSampleSum();
+
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ java.util.List<io.prometheus.client.Metrics.Bucket>
+ getBucketList();
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ io.prometheus.client.Metrics.Bucket getBucket(int index);
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ int getBucketCount();
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ java.util.List<? extends io.prometheus.client.Metrics.BucketOrBuilder>
+ getBucketOrBuilderList();
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ io.prometheus.client.Metrics.BucketOrBuilder getBucketOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.Histogram}
+ */
+ public static final class Histogram extends
+ com.google.protobuf.GeneratedMessage implements
+ // @@protoc_insertion_point(message_implements:io.prometheus.client.Histogram)
+ HistogramOrBuilder {
+ // Use Histogram.newBuilder() to construct.
+ private Histogram(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Histogram(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Histogram defaultInstance;
+ public static Histogram getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Histogram getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Histogram(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ sampleCount_ = input.readUInt64();
+ break;
+ }
+ case 17: {
+ bitField0_ |= 0x00000002;
+ sampleSum_ = input.readDouble();
+ break;
+ }
+ case 26: {
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ bucket_ = new java.util.ArrayList<io.prometheus.client.Metrics.Bucket>();
+ mutable_bitField0_ |= 0x00000004;
+ }
+ bucket_.add(input.readMessage(io.prometheus.client.Metrics.Bucket.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ bucket_ = java.util.Collections.unmodifiableList(bucket_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Histogram_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Histogram_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.Histogram.class, io.prometheus.client.Metrics.Histogram.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<Histogram> PARSER =
+ new com.google.protobuf.AbstractParser<Histogram>() {
+ public Histogram parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Histogram(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<Histogram> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ public static final int SAMPLE_COUNT_FIELD_NUMBER = 1;
+ private long sampleCount_;
+ /**
+ * <code>optional uint64 sample_count = 1;</code>
+ */
+ public boolean hasSampleCount() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional uint64 sample_count = 1;</code>
+ */
+ public long getSampleCount() {
+ return sampleCount_;
+ }
+
+ public static final int SAMPLE_SUM_FIELD_NUMBER = 2;
+ private double sampleSum_;
+ /**
+ * <code>optional double sample_sum = 2;</code>
+ */
+ public boolean hasSampleSum() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional double sample_sum = 2;</code>
+ */
+ public double getSampleSum() {
+ return sampleSum_;
+ }
+
+ public static final int BUCKET_FIELD_NUMBER = 3;
+ private java.util.List<io.prometheus.client.Metrics.Bucket> bucket_;
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public java.util.List<io.prometheus.client.Metrics.Bucket> getBucketList() {
+ return bucket_;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public java.util.List<? extends io.prometheus.client.Metrics.BucketOrBuilder>
+ getBucketOrBuilderList() {
+ return bucket_;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public int getBucketCount() {
+ return bucket_.size();
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public io.prometheus.client.Metrics.Bucket getBucket(int index) {
+ return bucket_.get(index);
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public io.prometheus.client.Metrics.BucketOrBuilder getBucketOrBuilder(
+ int index) {
+ return bucket_.get(index);
+ }
+
+ private void initFields() {
+ sampleCount_ = 0L;
+ sampleSum_ = 0D;
+ bucket_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeUInt64(1, sampleCount_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeDouble(2, sampleSum_);
+ }
+ for (int i = 0; i < bucket_.size(); i++) {
+ output.writeMessage(3, bucket_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(1, sampleCount_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeDoubleSize(2, sampleSum_);
+ }
+ for (int i = 0; i < bucket_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, bucket_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static io.prometheus.client.Metrics.Histogram parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.Histogram parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Histogram parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.Histogram parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Histogram parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Histogram parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Histogram parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Histogram parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Histogram parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Histogram parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(io.prometheus.client.Metrics.Histogram prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.Histogram}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder> implements
+ // @@protoc_insertion_point(builder_implements:io.prometheus.client.Histogram)
+ io.prometheus.client.Metrics.HistogramOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Histogram_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Histogram_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.Histogram.class, io.prometheus.client.Metrics.Histogram.Builder.class);
+ }
+
+ // Construct using io.prometheus.client.Metrics.Histogram.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getBucketFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ sampleCount_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ sampleSum_ = 0D;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (bucketBuilder_ == null) {
+ bucket_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ bucketBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Histogram_descriptor;
+ }
+
+ public io.prometheus.client.Metrics.Histogram getDefaultInstanceForType() {
+ return io.prometheus.client.Metrics.Histogram.getDefaultInstance();
+ }
+
+ public io.prometheus.client.Metrics.Histogram build() {
+ io.prometheus.client.Metrics.Histogram result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public io.prometheus.client.Metrics.Histogram buildPartial() {
+ io.prometheus.client.Metrics.Histogram result = new io.prometheus.client.Metrics.Histogram(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.sampleCount_ = sampleCount_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.sampleSum_ = sampleSum_;
+ if (bucketBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ bucket_ = java.util.Collections.unmodifiableList(bucket_);
+ bitField0_ = (bitField0_ & ~0x00000004);
+ }
+ result.bucket_ = bucket_;
+ } else {
+ result.bucket_ = bucketBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof io.prometheus.client.Metrics.Histogram) {
+ return mergeFrom((io.prometheus.client.Metrics.Histogram)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(io.prometheus.client.Metrics.Histogram other) {
+ if (other == io.prometheus.client.Metrics.Histogram.getDefaultInstance()) return this;
+ if (other.hasSampleCount()) {
+ setSampleCount(other.getSampleCount());
+ }
+ if (other.hasSampleSum()) {
+ setSampleSum(other.getSampleSum());
+ }
+ if (bucketBuilder_ == null) {
+ if (!other.bucket_.isEmpty()) {
+ if (bucket_.isEmpty()) {
+ bucket_ = other.bucket_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ ensureBucketIsMutable();
+ bucket_.addAll(other.bucket_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.bucket_.isEmpty()) {
+ if (bucketBuilder_.isEmpty()) {
+ bucketBuilder_.dispose();
+ bucketBuilder_ = null;
+ bucket_ = other.bucket_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ bucketBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getBucketFieldBuilder() : null;
+ } else {
+ bucketBuilder_.addAllMessages(other.bucket_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ io.prometheus.client.Metrics.Histogram parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (io.prometheus.client.Metrics.Histogram) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ private long sampleCount_ ;
+ /**
+ * <code>optional uint64 sample_count = 1;</code>
+ */
+ public boolean hasSampleCount() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional uint64 sample_count = 1;</code>
+ */
+ public long getSampleCount() {
+ return sampleCount_;
+ }
+ /**
+ * <code>optional uint64 sample_count = 1;</code>
+ */
+ public Builder setSampleCount(long value) {
+ bitField0_ |= 0x00000001;
+ sampleCount_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional uint64 sample_count = 1;</code>
+ */
+ public Builder clearSampleCount() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ sampleCount_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ private double sampleSum_ ;
+ /**
+ * <code>optional double sample_sum = 2;</code>
+ */
+ public boolean hasSampleSum() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional double sample_sum = 2;</code>
+ */
+ public double getSampleSum() {
+ return sampleSum_;
+ }
+ /**
+ * <code>optional double sample_sum = 2;</code>
+ */
+ public Builder setSampleSum(double value) {
+ bitField0_ |= 0x00000002;
+ sampleSum_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional double sample_sum = 2;</code>
+ */
+ public Builder clearSampleSum() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ sampleSum_ = 0D;
+ onChanged();
+ return this;
+ }
+
+ private java.util.List<io.prometheus.client.Metrics.Bucket> bucket_ =
+ java.util.Collections.emptyList();
+ private void ensureBucketIsMutable() {
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
+ bucket_ = new java.util.ArrayList<io.prometheus.client.Metrics.Bucket>(bucket_);
+ bitField0_ |= 0x00000004;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ io.prometheus.client.Metrics.Bucket, io.prometheus.client.Metrics.Bucket.Builder, io.prometheus.client.Metrics.BucketOrBuilder> bucketBuilder_;
+
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public java.util.List<io.prometheus.client.Metrics.Bucket> getBucketList() {
+ if (bucketBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(bucket_);
+ } else {
+ return bucketBuilder_.getMessageList();
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public int getBucketCount() {
+ if (bucketBuilder_ == null) {
+ return bucket_.size();
+ } else {
+ return bucketBuilder_.getCount();
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public io.prometheus.client.Metrics.Bucket getBucket(int index) {
+ if (bucketBuilder_ == null) {
+ return bucket_.get(index);
+ } else {
+ return bucketBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public Builder setBucket(
+ int index, io.prometheus.client.Metrics.Bucket value) {
+ if (bucketBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureBucketIsMutable();
+ bucket_.set(index, value);
+ onChanged();
+ } else {
+ bucketBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public Builder setBucket(
+ int index, io.prometheus.client.Metrics.Bucket.Builder builderForValue) {
+ if (bucketBuilder_ == null) {
+ ensureBucketIsMutable();
+ bucket_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ bucketBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public Builder addBucket(io.prometheus.client.Metrics.Bucket value) {
+ if (bucketBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureBucketIsMutable();
+ bucket_.add(value);
+ onChanged();
+ } else {
+ bucketBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public Builder addBucket(
+ int index, io.prometheus.client.Metrics.Bucket value) {
+ if (bucketBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureBucketIsMutable();
+ bucket_.add(index, value);
+ onChanged();
+ } else {
+ bucketBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public Builder addBucket(
+ io.prometheus.client.Metrics.Bucket.Builder builderForValue) {
+ if (bucketBuilder_ == null) {
+ ensureBucketIsMutable();
+ bucket_.add(builderForValue.build());
+ onChanged();
+ } else {
+ bucketBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public Builder addBucket(
+ int index, io.prometheus.client.Metrics.Bucket.Builder builderForValue) {
+ if (bucketBuilder_ == null) {
+ ensureBucketIsMutable();
+ bucket_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ bucketBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public Builder addAllBucket(
+ java.lang.Iterable<? extends io.prometheus.client.Metrics.Bucket> values) {
+ if (bucketBuilder_ == null) {
+ ensureBucketIsMutable();
+ com.google.protobuf.AbstractMessageLite.Builder.addAll(
+ values, bucket_);
+ onChanged();
+ } else {
+ bucketBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public Builder clearBucket() {
+ if (bucketBuilder_ == null) {
+ bucket_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ onChanged();
+ } else {
+ bucketBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public Builder removeBucket(int index) {
+ if (bucketBuilder_ == null) {
+ ensureBucketIsMutable();
+ bucket_.remove(index);
+ onChanged();
+ } else {
+ bucketBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public io.prometheus.client.Metrics.Bucket.Builder getBucketBuilder(
+ int index) {
+ return getBucketFieldBuilder().getBuilder(index);
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public io.prometheus.client.Metrics.BucketOrBuilder getBucketOrBuilder(
+ int index) {
+ if (bucketBuilder_ == null) {
+ return bucket_.get(index); } else {
+ return bucketBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public java.util.List<? extends io.prometheus.client.Metrics.BucketOrBuilder>
+ getBucketOrBuilderList() {
+ if (bucketBuilder_ != null) {
+ return bucketBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(bucket_);
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public io.prometheus.client.Metrics.Bucket.Builder addBucketBuilder() {
+ return getBucketFieldBuilder().addBuilder(
+ io.prometheus.client.Metrics.Bucket.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public io.prometheus.client.Metrics.Bucket.Builder addBucketBuilder(
+ int index) {
+ return getBucketFieldBuilder().addBuilder(
+ index, io.prometheus.client.Metrics.Bucket.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Bucket bucket = 3;</code>
+ *
+ * <pre>
+ * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ * </pre>
+ */
+ public java.util.List<io.prometheus.client.Metrics.Bucket.Builder>
+ getBucketBuilderList() {
+ return getBucketFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ io.prometheus.client.Metrics.Bucket, io.prometheus.client.Metrics.Bucket.Builder, io.prometheus.client.Metrics.BucketOrBuilder>
+ getBucketFieldBuilder() {
+ if (bucketBuilder_ == null) {
+ bucketBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ io.prometheus.client.Metrics.Bucket, io.prometheus.client.Metrics.Bucket.Builder, io.prometheus.client.Metrics.BucketOrBuilder>(
+ bucket_,
+ ((bitField0_ & 0x00000004) == 0x00000004),
+ getParentForChildren(),
+ isClean());
+ bucket_ = null;
+ }
+ return bucketBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:io.prometheus.client.Histogram)
+ }
+
+ static {
+ defaultInstance = new Histogram(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.Histogram)
+ }
+
+ public interface BucketOrBuilder extends
+ // @@protoc_insertion_point(interface_extends:io.prometheus.client.Bucket)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ * <code>optional uint64 cumulative_count = 1;</code>
+ *
+ * <pre>
+ * Cumulative in increasing order.
+ * </pre>
+ */
+ boolean hasCumulativeCount();
+ /**
+ * <code>optional uint64 cumulative_count = 1;</code>
+ *
+ * <pre>
+ * Cumulative in increasing order.
+ * </pre>
+ */
+ long getCumulativeCount();
+
+ /**
+ * <code>optional double upper_bound = 2;</code>
+ *
+ * <pre>
+ * Inclusive.
+ * </pre>
+ */
+ boolean hasUpperBound();
+ /**
+ * <code>optional double upper_bound = 2;</code>
+ *
+ * <pre>
+ * Inclusive.
+ * </pre>
+ */
+ double getUpperBound();
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.Bucket}
+ */
+ public static final class Bucket extends
+ com.google.protobuf.GeneratedMessage implements
+ // @@protoc_insertion_point(message_implements:io.prometheus.client.Bucket)
+ BucketOrBuilder {
+ // Use Bucket.newBuilder() to construct.
+ private Bucket(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Bucket(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Bucket defaultInstance;
+ public static Bucket getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Bucket getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Bucket(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ cumulativeCount_ = input.readUInt64();
+ break;
+ }
+ case 17: {
+ bitField0_ |= 0x00000002;
+ upperBound_ = input.readDouble();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Bucket_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Bucket_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.Bucket.class, io.prometheus.client.Metrics.Bucket.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<Bucket> PARSER =
+ new com.google.protobuf.AbstractParser<Bucket>() {
+ public Bucket parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Bucket(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<Bucket> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ public static final int CUMULATIVE_COUNT_FIELD_NUMBER = 1;
+ private long cumulativeCount_;
+ /**
+ * <code>optional uint64 cumulative_count = 1;</code>
+ *
+ * <pre>
+ * Cumulative in increasing order.
+ * </pre>
+ */
+ public boolean hasCumulativeCount() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional uint64 cumulative_count = 1;</code>
+ *
+ * <pre>
+ * Cumulative in increasing order.
+ * </pre>
+ */
+ public long getCumulativeCount() {
+ return cumulativeCount_;
+ }
+
+ public static final int UPPER_BOUND_FIELD_NUMBER = 2;
+ private double upperBound_;
+ /**
+ * <code>optional double upper_bound = 2;</code>
+ *
+ * <pre>
+ * Inclusive.
+ * </pre>
+ */
+ public boolean hasUpperBound() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional double upper_bound = 2;</code>
+ *
+ * <pre>
+ * Inclusive.
+ * </pre>
+ */
+ public double getUpperBound() {
+ return upperBound_;
+ }
+
+ private void initFields() {
+ cumulativeCount_ = 0L;
+ upperBound_ = 0D;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeUInt64(1, cumulativeCount_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeDouble(2, upperBound_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(1, cumulativeCount_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeDoubleSize(2, upperBound_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static io.prometheus.client.Metrics.Bucket parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.Bucket parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Bucket parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.Bucket parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Bucket parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Bucket parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Bucket parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Bucket parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Bucket parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Bucket parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(io.prometheus.client.Metrics.Bucket prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.Bucket}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder> implements
+ // @@protoc_insertion_point(builder_implements:io.prometheus.client.Bucket)
+ io.prometheus.client.Metrics.BucketOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Bucket_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Bucket_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.Bucket.class, io.prometheus.client.Metrics.Bucket.Builder.class);
+ }
+
+ // Construct using io.prometheus.client.Metrics.Bucket.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ cumulativeCount_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ upperBound_ = 0D;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Bucket_descriptor;
+ }
+
+ public io.prometheus.client.Metrics.Bucket getDefaultInstanceForType() {
+ return io.prometheus.client.Metrics.Bucket.getDefaultInstance();
+ }
+
+ public io.prometheus.client.Metrics.Bucket build() {
+ io.prometheus.client.Metrics.Bucket result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public io.prometheus.client.Metrics.Bucket buildPartial() {
+ io.prometheus.client.Metrics.Bucket result = new io.prometheus.client.Metrics.Bucket(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.cumulativeCount_ = cumulativeCount_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.upperBound_ = upperBound_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof io.prometheus.client.Metrics.Bucket) {
+ return mergeFrom((io.prometheus.client.Metrics.Bucket)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(io.prometheus.client.Metrics.Bucket other) {
+ if (other == io.prometheus.client.Metrics.Bucket.getDefaultInstance()) return this;
+ if (other.hasCumulativeCount()) {
+ setCumulativeCount(other.getCumulativeCount());
+ }
+ if (other.hasUpperBound()) {
+ setUpperBound(other.getUpperBound());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ io.prometheus.client.Metrics.Bucket parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (io.prometheus.client.Metrics.Bucket) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ private long cumulativeCount_ ;
+ /**
+ * <code>optional uint64 cumulative_count = 1;</code>
+ *
+ * <pre>
+ * Cumulative in increasing order.
+ * </pre>
+ */
+ public boolean hasCumulativeCount() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional uint64 cumulative_count = 1;</code>
+ *
+ * <pre>
+ * Cumulative in increasing order.
+ * </pre>
+ */
+ public long getCumulativeCount() {
+ return cumulativeCount_;
+ }
+ /**
+ * <code>optional uint64 cumulative_count = 1;</code>
+ *
+ * <pre>
+ * Cumulative in increasing order.
+ * </pre>
+ */
+ public Builder setCumulativeCount(long value) {
+ bitField0_ |= 0x00000001;
+ cumulativeCount_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional uint64 cumulative_count = 1;</code>
+ *
+ * <pre>
+ * Cumulative in increasing order.
+ * </pre>
+ */
+ public Builder clearCumulativeCount() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ cumulativeCount_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ private double upperBound_ ;
+ /**
+ * <code>optional double upper_bound = 2;</code>
+ *
+ * <pre>
+ * Inclusive.
+ * </pre>
+ */
+ public boolean hasUpperBound() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional double upper_bound = 2;</code>
+ *
+ * <pre>
+ * Inclusive.
+ * </pre>
+ */
+ public double getUpperBound() {
+ return upperBound_;
+ }
+ /**
+ * <code>optional double upper_bound = 2;</code>
+ *
+ * <pre>
+ * Inclusive.
+ * </pre>
+ */
+ public Builder setUpperBound(double value) {
+ bitField0_ |= 0x00000002;
+ upperBound_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional double upper_bound = 2;</code>
+ *
+ * <pre>
+ * Inclusive.
+ * </pre>
+ */
+ public Builder clearUpperBound() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ upperBound_ = 0D;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:io.prometheus.client.Bucket)
+ }
+
+ static {
+ defaultInstance = new Bucket(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.Bucket)
+ }
+
+ public interface MetricOrBuilder extends
+ // @@protoc_insertion_point(interface_extends:io.prometheus.client.Metric)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ java.util.List<io.prometheus.client.Metrics.LabelPair>
+ getLabelList();
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ io.prometheus.client.Metrics.LabelPair getLabel(int index);
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ int getLabelCount();
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ java.util.List<? extends io.prometheus.client.Metrics.LabelPairOrBuilder>
+ getLabelOrBuilderList();
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ io.prometheus.client.Metrics.LabelPairOrBuilder getLabelOrBuilder(
+ int index);
+
+ /**
+ * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
+ */
+ boolean hasGauge();
+ /**
+ * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
+ */
+ io.prometheus.client.Metrics.Gauge getGauge();
+ /**
+ * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
+ */
+ io.prometheus.client.Metrics.GaugeOrBuilder getGaugeOrBuilder();
+
+ /**
+ * <code>optional .io.prometheus.client.Counter counter = 3;</code>
+ */
+ boolean hasCounter();
+ /**
+ * <code>optional .io.prometheus.client.Counter counter = 3;</code>
+ */
+ io.prometheus.client.Metrics.Counter getCounter();
+ /**
+ * <code>optional .io.prometheus.client.Counter counter = 3;</code>
+ */
+ io.prometheus.client.Metrics.CounterOrBuilder getCounterOrBuilder();
+
+ /**
+ * <code>optional .io.prometheus.client.Summary summary = 4;</code>
+ */
+ boolean hasSummary();
+ /**
+ * <code>optional .io.prometheus.client.Summary summary = 4;</code>
+ */
+ io.prometheus.client.Metrics.Summary getSummary();
+ /**
+ * <code>optional .io.prometheus.client.Summary summary = 4;</code>
+ */
+ io.prometheus.client.Metrics.SummaryOrBuilder getSummaryOrBuilder();
+
+ /**
+ * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
+ */
+ boolean hasUntyped();
+ /**
+ * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
+ */
+ io.prometheus.client.Metrics.Untyped getUntyped();
+ /**
+ * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
+ */
+ io.prometheus.client.Metrics.UntypedOrBuilder getUntypedOrBuilder();
+
+ /**
+ * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
+ */
+ boolean hasHistogram();
+ /**
+ * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
+ */
+ io.prometheus.client.Metrics.Histogram getHistogram();
+ /**
+ * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
+ */
+ io.prometheus.client.Metrics.HistogramOrBuilder getHistogramOrBuilder();
+
+ /**
+ * <code>optional int64 timestamp_ms = 6;</code>
+ */
+ boolean hasTimestampMs();
+ /**
+ * <code>optional int64 timestamp_ms = 6;</code>
+ */
+ long getTimestampMs();
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.Metric}
+ */
+ public static final class Metric extends
+ com.google.protobuf.GeneratedMessage implements
+ // @@protoc_insertion_point(message_implements:io.prometheus.client.Metric)
+ MetricOrBuilder {
+ // Use Metric.newBuilder() to construct.
+ private Metric(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Metric(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Metric defaultInstance;
+ public static Metric getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Metric getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Metric(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ label_ = new java.util.ArrayList<io.prometheus.client.Metrics.LabelPair>();
+ mutable_bitField0_ |= 0x00000001;
+ }
+ label_.add(input.readMessage(io.prometheus.client.Metrics.LabelPair.PARSER, extensionRegistry));
+ break;
+ }
+ case 18: {
+ io.prometheus.client.Metrics.Gauge.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = gauge_.toBuilder();
+ }
+ gauge_ = input.readMessage(io.prometheus.client.Metrics.Gauge.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(gauge_);
+ gauge_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 26: {
+ io.prometheus.client.Metrics.Counter.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = counter_.toBuilder();
+ }
+ counter_ = input.readMessage(io.prometheus.client.Metrics.Counter.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(counter_);
+ counter_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 34: {
+ io.prometheus.client.Metrics.Summary.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ subBuilder = summary_.toBuilder();
+ }
+ summary_ = input.readMessage(io.prometheus.client.Metrics.Summary.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(summary_);
+ summary_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000004;
+ break;
+ }
+ case 42: {
+ io.prometheus.client.Metrics.Untyped.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ subBuilder = untyped_.toBuilder();
+ }
+ untyped_ = input.readMessage(io.prometheus.client.Metrics.Untyped.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(untyped_);
+ untyped_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000008;
+ break;
+ }
+ case 48: {
+ bitField0_ |= 0x00000020;
+ timestampMs_ = input.readInt64();
+ break;
+ }
+ case 58: {
+ io.prometheus.client.Metrics.Histogram.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ subBuilder = histogram_.toBuilder();
+ }
+ histogram_ = input.readMessage(io.prometheus.client.Metrics.Histogram.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(histogram_);
+ histogram_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000010;
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ label_ = java.util.Collections.unmodifiableList(label_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Metric_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Metric_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.Metric.class, io.prometheus.client.Metrics.Metric.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<Metric> PARSER =
+ new com.google.protobuf.AbstractParser<Metric>() {
+ public Metric parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Metric(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<Metric> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ public static final int LABEL_FIELD_NUMBER = 1;
+ private java.util.List<io.prometheus.client.Metrics.LabelPair> label_;
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public java.util.List<io.prometheus.client.Metrics.LabelPair> getLabelList() {
+ return label_;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public java.util.List<? extends io.prometheus.client.Metrics.LabelPairOrBuilder>
+ getLabelOrBuilderList() {
+ return label_;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public int getLabelCount() {
+ return label_.size();
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public io.prometheus.client.Metrics.LabelPair getLabel(int index) {
+ return label_.get(index);
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public io.prometheus.client.Metrics.LabelPairOrBuilder getLabelOrBuilder(
+ int index) {
+ return label_.get(index);
+ }
+
+ public static final int GAUGE_FIELD_NUMBER = 2;
+ private io.prometheus.client.Metrics.Gauge gauge_;
+ /**
+ * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
+ */
+ public boolean hasGauge() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
+ */
+ public io.prometheus.client.Metrics.Gauge getGauge() {
+ return gauge_;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
+ */
+ public io.prometheus.client.Metrics.GaugeOrBuilder getGaugeOrBuilder() {
+ return gauge_;
+ }
+
+ public static final int COUNTER_FIELD_NUMBER = 3;
+ private io.prometheus.client.Metrics.Counter counter_;
+ /**
+ * <code>optional .io.prometheus.client.Counter counter = 3;</code>
+ */
+ public boolean hasCounter() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional .io.prometheus.client.Counter counter = 3;</code>
+ */
+ public io.prometheus.client.Metrics.Counter getCounter() {
+ return counter_;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Counter counter = 3;</code>
+ */
+ public io.prometheus.client.Metrics.CounterOrBuilder getCounterOrBuilder() {
+ return counter_;
+ }
+
+ public static final int SUMMARY_FIELD_NUMBER = 4;
+ private io.prometheus.client.Metrics.Summary summary_;
+ /**
+ * <code>optional .io.prometheus.client.Summary summary = 4;</code>
+ */
+ public boolean hasSummary() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional .io.prometheus.client.Summary summary = 4;</code>
+ */
+ public io.prometheus.client.Metrics.Summary getSummary() {
+ return summary_;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Summary summary = 4;</code>
+ */
+ public io.prometheus.client.Metrics.SummaryOrBuilder getSummaryOrBuilder() {
+ return summary_;
+ }
+
+ public static final int UNTYPED_FIELD_NUMBER = 5;
+ private io.prometheus.client.Metrics.Untyped untyped_;
+ /**
+ * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
+ */
+ public boolean hasUntyped() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
+ */
+ public io.prometheus.client.Metrics.Untyped getUntyped() {
+ return untyped_;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
+ */
+ public io.prometheus.client.Metrics.UntypedOrBuilder getUntypedOrBuilder() {
+ return untyped_;
+ }
+
+ public static final int HISTOGRAM_FIELD_NUMBER = 7;
+ private io.prometheus.client.Metrics.Histogram histogram_;
+ /**
+ * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
+ */
+ public boolean hasHistogram() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
+ */
+ public io.prometheus.client.Metrics.Histogram getHistogram() {
+ return histogram_;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
+ */
+ public io.prometheus.client.Metrics.HistogramOrBuilder getHistogramOrBuilder() {
+ return histogram_;
+ }
+
+ public static final int TIMESTAMP_MS_FIELD_NUMBER = 6;
+ private long timestampMs_;
+ /**
+ * <code>optional int64 timestamp_ms = 6;</code>
+ */
+ public boolean hasTimestampMs() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * <code>optional int64 timestamp_ms = 6;</code>
+ */
+ public long getTimestampMs() {
+ return timestampMs_;
+ }
+
+ private void initFields() {
+ label_ = java.util.Collections.emptyList();
+ gauge_ = io.prometheus.client.Metrics.Gauge.getDefaultInstance();
+ counter_ = io.prometheus.client.Metrics.Counter.getDefaultInstance();
+ summary_ = io.prometheus.client.Metrics.Summary.getDefaultInstance();
+ untyped_ = io.prometheus.client.Metrics.Untyped.getDefaultInstance();
+ histogram_ = io.prometheus.client.Metrics.Histogram.getDefaultInstance();
+ timestampMs_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ for (int i = 0; i < label_.size(); i++) {
+ output.writeMessage(1, label_.get(i));
+ }
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(2, gauge_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(3, counter_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeMessage(4, summary_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeMessage(5, untyped_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ output.writeInt64(6, timestampMs_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeMessage(7, histogram_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ for (int i = 0; i < label_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, label_.get(i));
+ }
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, gauge_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, counter_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(4, summary_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(5, untyped_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(6, timestampMs_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(7, histogram_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static io.prometheus.client.Metrics.Metric parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.Metric parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Metric parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.Metric parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Metric parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Metric parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Metric parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Metric parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.Metric parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.Metric parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(io.prometheus.client.Metrics.Metric prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.Metric}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder> implements
+ // @@protoc_insertion_point(builder_implements:io.prometheus.client.Metric)
+ io.prometheus.client.Metrics.MetricOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Metric_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Metric_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.Metric.class, io.prometheus.client.Metrics.Metric.Builder.class);
+ }
+
+ // Construct using io.prometheus.client.Metrics.Metric.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getLabelFieldBuilder();
+ getGaugeFieldBuilder();
+ getCounterFieldBuilder();
+ getSummaryFieldBuilder();
+ getUntypedFieldBuilder();
+ getHistogramFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (labelBuilder_ == null) {
+ label_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ labelBuilder_.clear();
+ }
+ if (gaugeBuilder_ == null) {
+ gauge_ = io.prometheus.client.Metrics.Gauge.getDefaultInstance();
+ } else {
+ gaugeBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (counterBuilder_ == null) {
+ counter_ = io.prometheus.client.Metrics.Counter.getDefaultInstance();
+ } else {
+ counterBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ if (summaryBuilder_ == null) {
+ summary_ = io.prometheus.client.Metrics.Summary.getDefaultInstance();
+ } else {
+ summaryBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000008);
+ if (untypedBuilder_ == null) {
+ untyped_ = io.prometheus.client.Metrics.Untyped.getDefaultInstance();
+ } else {
+ untypedBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000010);
+ if (histogramBuilder_ == null) {
+ histogram_ = io.prometheus.client.Metrics.Histogram.getDefaultInstance();
+ } else {
+ histogramBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000020);
+ timestampMs_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000040);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Metric_descriptor;
+ }
+
+ public io.prometheus.client.Metrics.Metric getDefaultInstanceForType() {
+ return io.prometheus.client.Metrics.Metric.getDefaultInstance();
+ }
+
+ public io.prometheus.client.Metrics.Metric build() {
+ io.prometheus.client.Metrics.Metric result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public io.prometheus.client.Metrics.Metric buildPartial() {
+ io.prometheus.client.Metrics.Metric result = new io.prometheus.client.Metrics.Metric(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (labelBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ label_ = java.util.Collections.unmodifiableList(label_);
+ bitField0_ = (bitField0_ & ~0x00000001);
+ }
+ result.label_ = label_;
+ } else {
+ result.label_ = labelBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (gaugeBuilder_ == null) {
+ result.gauge_ = gauge_;
+ } else {
+ result.gauge_ = gaugeBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (counterBuilder_ == null) {
+ result.counter_ = counter_;
+ } else {
+ result.counter_ = counterBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ if (summaryBuilder_ == null) {
+ result.summary_ = summary_;
+ } else {
+ result.summary_ = summaryBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ if (untypedBuilder_ == null) {
+ result.untyped_ = untyped_;
+ } else {
+ result.untyped_ = untypedBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ if (histogramBuilder_ == null) {
+ result.histogram_ = histogram_;
+ } else {
+ result.histogram_ = histogramBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
+ to_bitField0_ |= 0x00000020;
+ }
+ result.timestampMs_ = timestampMs_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof io.prometheus.client.Metrics.Metric) {
+ return mergeFrom((io.prometheus.client.Metrics.Metric)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(io.prometheus.client.Metrics.Metric other) {
+ if (other == io.prometheus.client.Metrics.Metric.getDefaultInstance()) return this;
+ if (labelBuilder_ == null) {
+ if (!other.label_.isEmpty()) {
+ if (label_.isEmpty()) {
+ label_ = other.label_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ ensureLabelIsMutable();
+ label_.addAll(other.label_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.label_.isEmpty()) {
+ if (labelBuilder_.isEmpty()) {
+ labelBuilder_.dispose();
+ labelBuilder_ = null;
+ label_ = other.label_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ labelBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getLabelFieldBuilder() : null;
+ } else {
+ labelBuilder_.addAllMessages(other.label_);
+ }
+ }
+ }
+ if (other.hasGauge()) {
+ mergeGauge(other.getGauge());
+ }
+ if (other.hasCounter()) {
+ mergeCounter(other.getCounter());
+ }
+ if (other.hasSummary()) {
+ mergeSummary(other.getSummary());
+ }
+ if (other.hasUntyped()) {
+ mergeUntyped(other.getUntyped());
+ }
+ if (other.hasHistogram()) {
+ mergeHistogram(other.getHistogram());
+ }
+ if (other.hasTimestampMs()) {
+ setTimestampMs(other.getTimestampMs());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ io.prometheus.client.Metrics.Metric parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (io.prometheus.client.Metrics.Metric) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ private java.util.List<io.prometheus.client.Metrics.LabelPair> label_ =
+ java.util.Collections.emptyList();
+ private void ensureLabelIsMutable() {
+ if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+ label_ = new java.util.ArrayList<io.prometheus.client.Metrics.LabelPair>(label_);
+ bitField0_ |= 0x00000001;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ io.prometheus.client.Metrics.LabelPair, io.prometheus.client.Metrics.LabelPair.Builder, io.prometheus.client.Metrics.LabelPairOrBuilder> labelBuilder_;
+
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public java.util.List<io.prometheus.client.Metrics.LabelPair> getLabelList() {
+ if (labelBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(label_);
+ } else {
+ return labelBuilder_.getMessageList();
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public int getLabelCount() {
+ if (labelBuilder_ == null) {
+ return label_.size();
+ } else {
+ return labelBuilder_.getCount();
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public io.prometheus.client.Metrics.LabelPair getLabel(int index) {
+ if (labelBuilder_ == null) {
+ return label_.get(index);
+ } else {
+ return labelBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public Builder setLabel(
+ int index, io.prometheus.client.Metrics.LabelPair value) {
+ if (labelBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureLabelIsMutable();
+ label_.set(index, value);
+ onChanged();
+ } else {
+ labelBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public Builder setLabel(
+ int index, io.prometheus.client.Metrics.LabelPair.Builder builderForValue) {
+ if (labelBuilder_ == null) {
+ ensureLabelIsMutable();
+ label_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ labelBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public Builder addLabel(io.prometheus.client.Metrics.LabelPair value) {
+ if (labelBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureLabelIsMutable();
+ label_.add(value);
+ onChanged();
+ } else {
+ labelBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public Builder addLabel(
+ int index, io.prometheus.client.Metrics.LabelPair value) {
+ if (labelBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureLabelIsMutable();
+ label_.add(index, value);
+ onChanged();
+ } else {
+ labelBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public Builder addLabel(
+ io.prometheus.client.Metrics.LabelPair.Builder builderForValue) {
+ if (labelBuilder_ == null) {
+ ensureLabelIsMutable();
+ label_.add(builderForValue.build());
+ onChanged();
+ } else {
+ labelBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public Builder addLabel(
+ int index, io.prometheus.client.Metrics.LabelPair.Builder builderForValue) {
+ if (labelBuilder_ == null) {
+ ensureLabelIsMutable();
+ label_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ labelBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public Builder addAllLabel(
+ java.lang.Iterable<? extends io.prometheus.client.Metrics.LabelPair> values) {
+ if (labelBuilder_ == null) {
+ ensureLabelIsMutable();
+ com.google.protobuf.AbstractMessageLite.Builder.addAll(
+ values, label_);
+ onChanged();
+ } else {
+ labelBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public Builder clearLabel() {
+ if (labelBuilder_ == null) {
+ label_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ } else {
+ labelBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public Builder removeLabel(int index) {
+ if (labelBuilder_ == null) {
+ ensureLabelIsMutable();
+ label_.remove(index);
+ onChanged();
+ } else {
+ labelBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public io.prometheus.client.Metrics.LabelPair.Builder getLabelBuilder(
+ int index) {
+ return getLabelFieldBuilder().getBuilder(index);
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public io.prometheus.client.Metrics.LabelPairOrBuilder getLabelOrBuilder(
+ int index) {
+ if (labelBuilder_ == null) {
+ return label_.get(index); } else {
+ return labelBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public java.util.List<? extends io.prometheus.client.Metrics.LabelPairOrBuilder>
+ getLabelOrBuilderList() {
+ if (labelBuilder_ != null) {
+ return labelBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(label_);
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public io.prometheus.client.Metrics.LabelPair.Builder addLabelBuilder() {
+ return getLabelFieldBuilder().addBuilder(
+ io.prometheus.client.Metrics.LabelPair.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public io.prometheus.client.Metrics.LabelPair.Builder addLabelBuilder(
+ int index) {
+ return getLabelFieldBuilder().addBuilder(
+ index, io.prometheus.client.Metrics.LabelPair.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .io.prometheus.client.LabelPair label = 1;</code>
+ */
+ public java.util.List<io.prometheus.client.Metrics.LabelPair.Builder>
+ getLabelBuilderList() {
+ return getLabelFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ io.prometheus.client.Metrics.LabelPair, io.prometheus.client.Metrics.LabelPair.Builder, io.prometheus.client.Metrics.LabelPairOrBuilder>
+ getLabelFieldBuilder() {
+ if (labelBuilder_ == null) {
+ labelBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ io.prometheus.client.Metrics.LabelPair, io.prometheus.client.Metrics.LabelPair.Builder, io.prometheus.client.Metrics.LabelPairOrBuilder>(
+ label_,
+ ((bitField0_ & 0x00000001) == 0x00000001),
+ getParentForChildren(),
+ isClean());
+ label_ = null;
+ }
+ return labelBuilder_;
+ }
+
+ private io.prometheus.client.Metrics.Gauge gauge_ = io.prometheus.client.Metrics.Gauge.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ io.prometheus.client.Metrics.Gauge, io.prometheus.client.Metrics.Gauge.Builder, io.prometheus.client.Metrics.GaugeOrBuilder> gaugeBuilder_;
+ /**
+ * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
+ */
+ public boolean hasGauge() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
+ */
+ public io.prometheus.client.Metrics.Gauge getGauge() {
+ if (gaugeBuilder_ == null) {
+ return gauge_;
+ } else {
+ return gaugeBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
+ */
+ public Builder setGauge(io.prometheus.client.Metrics.Gauge value) {
+ if (gaugeBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ gauge_ = value;
+ onChanged();
+ } else {
+ gaugeBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
+ */
+ public Builder setGauge(
+ io.prometheus.client.Metrics.Gauge.Builder builderForValue) {
+ if (gaugeBuilder_ == null) {
+ gauge_ = builderForValue.build();
+ onChanged();
+ } else {
+ gaugeBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
+ */
+ public Builder mergeGauge(io.prometheus.client.Metrics.Gauge value) {
+ if (gaugeBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ gauge_ != io.prometheus.client.Metrics.Gauge.getDefaultInstance()) {
+ gauge_ =
+ io.prometheus.client.Metrics.Gauge.newBuilder(gauge_).mergeFrom(value).buildPartial();
+ } else {
+ gauge_ = value;
+ }
+ onChanged();
+ } else {
+ gaugeBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
+ */
+ public Builder clearGauge() {
+ if (gaugeBuilder_ == null) {
+ gauge_ = io.prometheus.client.Metrics.Gauge.getDefaultInstance();
+ onChanged();
+ } else {
+ gaugeBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
+ */
+ public io.prometheus.client.Metrics.Gauge.Builder getGaugeBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getGaugeFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
+ */
+ public io.prometheus.client.Metrics.GaugeOrBuilder getGaugeOrBuilder() {
+ if (gaugeBuilder_ != null) {
+ return gaugeBuilder_.getMessageOrBuilder();
+ } else {
+ return gauge_;
+ }
+ }
+ /**
+ * <code>optional .io.prometheus.client.Gauge gauge = 2;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ io.prometheus.client.Metrics.Gauge, io.prometheus.client.Metrics.Gauge.Builder, io.prometheus.client.Metrics.GaugeOrBuilder>
+ getGaugeFieldBuilder() {
+ if (gaugeBuilder_ == null) {
+ gaugeBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ io.prometheus.client.Metrics.Gauge, io.prometheus.client.Metrics.Gauge.Builder, io.prometheus.client.Metrics.GaugeOrBuilder>(
+ getGauge(),
+ getParentForChildren(),
+ isClean());
+ gauge_ = null;
+ }
+ return gaugeBuilder_;
+ }
+
+ private io.prometheus.client.Metrics.Counter counter_ = io.prometheus.client.Metrics.Counter.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ io.prometheus.client.Metrics.Counter, io.prometheus.client.Metrics.Counter.Builder, io.prometheus.client.Metrics.CounterOrBuilder> counterBuilder_;
+ /**
+ * <code>optional .io.prometheus.client.Counter counter = 3;</code>
+ */
+ public boolean hasCounter() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional .io.prometheus.client.Counter counter = 3;</code>
+ */
+ public io.prometheus.client.Metrics.Counter getCounter() {
+ if (counterBuilder_ == null) {
+ return counter_;
+ } else {
+ return counterBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>optional .io.prometheus.client.Counter counter = 3;</code>
+ */
+ public Builder setCounter(io.prometheus.client.Metrics.Counter value) {
+ if (counterBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ counter_ = value;
+ onChanged();
+ } else {
+ counterBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Counter counter = 3;</code>
+ */
+ public Builder setCounter(
+ io.prometheus.client.Metrics.Counter.Builder builderForValue) {
+ if (counterBuilder_ == null) {
+ counter_ = builderForValue.build();
+ onChanged();
+ } else {
+ counterBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Counter counter = 3;</code>
+ */
+ public Builder mergeCounter(io.prometheus.client.Metrics.Counter value) {
+ if (counterBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004) &&
+ counter_ != io.prometheus.client.Metrics.Counter.getDefaultInstance()) {
+ counter_ =
+ io.prometheus.client.Metrics.Counter.newBuilder(counter_).mergeFrom(value).buildPartial();
+ } else {
+ counter_ = value;
+ }
+ onChanged();
+ } else {
+ counterBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Counter counter = 3;</code>
+ */
+ public Builder clearCounter() {
+ if (counterBuilder_ == null) {
+ counter_ = io.prometheus.client.Metrics.Counter.getDefaultInstance();
+ onChanged();
+ } else {
+ counterBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Counter counter = 3;</code>
+ */
+ public io.prometheus.client.Metrics.Counter.Builder getCounterBuilder() {
+ bitField0_ |= 0x00000004;
+ onChanged();
+ return getCounterFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>optional .io.prometheus.client.Counter counter = 3;</code>
+ */
+ public io.prometheus.client.Metrics.CounterOrBuilder getCounterOrBuilder() {
+ if (counterBuilder_ != null) {
+ return counterBuilder_.getMessageOrBuilder();
+ } else {
+ return counter_;
+ }
+ }
+ /**
+ * <code>optional .io.prometheus.client.Counter counter = 3;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ io.prometheus.client.Metrics.Counter, io.prometheus.client.Metrics.Counter.Builder, io.prometheus.client.Metrics.CounterOrBuilder>
+ getCounterFieldBuilder() {
+ if (counterBuilder_ == null) {
+ counterBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ io.prometheus.client.Metrics.Counter, io.prometheus.client.Metrics.Counter.Builder, io.prometheus.client.Metrics.CounterOrBuilder>(
+ getCounter(),
+ getParentForChildren(),
+ isClean());
+ counter_ = null;
+ }
+ return counterBuilder_;
+ }
+
+ private io.prometheus.client.Metrics.Summary summary_ = io.prometheus.client.Metrics.Summary.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ io.prometheus.client.Metrics.Summary, io.prometheus.client.Metrics.Summary.Builder, io.prometheus.client.Metrics.SummaryOrBuilder> summaryBuilder_;
+ /**
+ * <code>optional .io.prometheus.client.Summary summary = 4;</code>
+ */
+ public boolean hasSummary() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional .io.prometheus.client.Summary summary = 4;</code>
+ */
+ public io.prometheus.client.Metrics.Summary getSummary() {
+ if (summaryBuilder_ == null) {
+ return summary_;
+ } else {
+ return summaryBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>optional .io.prometheus.client.Summary summary = 4;</code>
+ */
+ public Builder setSummary(io.prometheus.client.Metrics.Summary value) {
+ if (summaryBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ summary_ = value;
+ onChanged();
+ } else {
+ summaryBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Summary summary = 4;</code>
+ */
+ public Builder setSummary(
+ io.prometheus.client.Metrics.Summary.Builder builderForValue) {
+ if (summaryBuilder_ == null) {
+ summary_ = builderForValue.build();
+ onChanged();
+ } else {
+ summaryBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Summary summary = 4;</code>
+ */
+ public Builder mergeSummary(io.prometheus.client.Metrics.Summary value) {
+ if (summaryBuilder_ == null) {
+ if (((bitField0_ & 0x00000008) == 0x00000008) &&
+ summary_ != io.prometheus.client.Metrics.Summary.getDefaultInstance()) {
+ summary_ =
+ io.prometheus.client.Metrics.Summary.newBuilder(summary_).mergeFrom(value).buildPartial();
+ } else {
+ summary_ = value;
+ }
+ onChanged();
+ } else {
+ summaryBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Summary summary = 4;</code>
+ */
+ public Builder clearSummary() {
+ if (summaryBuilder_ == null) {
+ summary_ = io.prometheus.client.Metrics.Summary.getDefaultInstance();
+ onChanged();
+ } else {
+ summaryBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000008);
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Summary summary = 4;</code>
+ */
+ public io.prometheus.client.Metrics.Summary.Builder getSummaryBuilder() {
+ bitField0_ |= 0x00000008;
+ onChanged();
+ return getSummaryFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>optional .io.prometheus.client.Summary summary = 4;</code>
+ */
+ public io.prometheus.client.Metrics.SummaryOrBuilder getSummaryOrBuilder() {
+ if (summaryBuilder_ != null) {
+ return summaryBuilder_.getMessageOrBuilder();
+ } else {
+ return summary_;
+ }
+ }
+ /**
+ * <code>optional .io.prometheus.client.Summary summary = 4;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ io.prometheus.client.Metrics.Summary, io.prometheus.client.Metrics.Summary.Builder, io.prometheus.client.Metrics.SummaryOrBuilder>
+ getSummaryFieldBuilder() {
+ if (summaryBuilder_ == null) {
+ summaryBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ io.prometheus.client.Metrics.Summary, io.prometheus.client.Metrics.Summary.Builder, io.prometheus.client.Metrics.SummaryOrBuilder>(
+ getSummary(),
+ getParentForChildren(),
+ isClean());
+ summary_ = null;
+ }
+ return summaryBuilder_;
+ }
+
+ private io.prometheus.client.Metrics.Untyped untyped_ = io.prometheus.client.Metrics.Untyped.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ io.prometheus.client.Metrics.Untyped, io.prometheus.client.Metrics.Untyped.Builder, io.prometheus.client.Metrics.UntypedOrBuilder> untypedBuilder_;
+ /**
+ * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
+ */
+ public boolean hasUntyped() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
+ */
+ public io.prometheus.client.Metrics.Untyped getUntyped() {
+ if (untypedBuilder_ == null) {
+ return untyped_;
+ } else {
+ return untypedBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
+ */
+ public Builder setUntyped(io.prometheus.client.Metrics.Untyped value) {
+ if (untypedBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ untyped_ = value;
+ onChanged();
+ } else {
+ untypedBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000010;
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
+ */
+ public Builder setUntyped(
+ io.prometheus.client.Metrics.Untyped.Builder builderForValue) {
+ if (untypedBuilder_ == null) {
+ untyped_ = builderForValue.build();
+ onChanged();
+ } else {
+ untypedBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000010;
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
+ */
+ public Builder mergeUntyped(io.prometheus.client.Metrics.Untyped value) {
+ if (untypedBuilder_ == null) {
+ if (((bitField0_ & 0x00000010) == 0x00000010) &&
+ untyped_ != io.prometheus.client.Metrics.Untyped.getDefaultInstance()) {
+ untyped_ =
+ io.prometheus.client.Metrics.Untyped.newBuilder(untyped_).mergeFrom(value).buildPartial();
+ } else {
+ untyped_ = value;
+ }
+ onChanged();
+ } else {
+ untypedBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000010;
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
+ */
+ public Builder clearUntyped() {
+ if (untypedBuilder_ == null) {
+ untyped_ = io.prometheus.client.Metrics.Untyped.getDefaultInstance();
+ onChanged();
+ } else {
+ untypedBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000010);
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
+ */
+ public io.prometheus.client.Metrics.Untyped.Builder getUntypedBuilder() {
+ bitField0_ |= 0x00000010;
+ onChanged();
+ return getUntypedFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
+ */
+ public io.prometheus.client.Metrics.UntypedOrBuilder getUntypedOrBuilder() {
+ if (untypedBuilder_ != null) {
+ return untypedBuilder_.getMessageOrBuilder();
+ } else {
+ return untyped_;
+ }
+ }
+ /**
+ * <code>optional .io.prometheus.client.Untyped untyped = 5;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ io.prometheus.client.Metrics.Untyped, io.prometheus.client.Metrics.Untyped.Builder, io.prometheus.client.Metrics.UntypedOrBuilder>
+ getUntypedFieldBuilder() {
+ if (untypedBuilder_ == null) {
+ untypedBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ io.prometheus.client.Metrics.Untyped, io.prometheus.client.Metrics.Untyped.Builder, io.prometheus.client.Metrics.UntypedOrBuilder>(
+ getUntyped(),
+ getParentForChildren(),
+ isClean());
+ untyped_ = null;
+ }
+ return untypedBuilder_;
+ }
+
+ private io.prometheus.client.Metrics.Histogram histogram_ = io.prometheus.client.Metrics.Histogram.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ io.prometheus.client.Metrics.Histogram, io.prometheus.client.Metrics.Histogram.Builder, io.prometheus.client.Metrics.HistogramOrBuilder> histogramBuilder_;
+ /**
+ * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
+ */
+ public boolean hasHistogram() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
+ */
+ public io.prometheus.client.Metrics.Histogram getHistogram() {
+ if (histogramBuilder_ == null) {
+ return histogram_;
+ } else {
+ return histogramBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
+ */
+ public Builder setHistogram(io.prometheus.client.Metrics.Histogram value) {
+ if (histogramBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ histogram_ = value;
+ onChanged();
+ } else {
+ histogramBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000020;
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
+ */
+ public Builder setHistogram(
+ io.prometheus.client.Metrics.Histogram.Builder builderForValue) {
+ if (histogramBuilder_ == null) {
+ histogram_ = builderForValue.build();
+ onChanged();
+ } else {
+ histogramBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000020;
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
+ */
+ public Builder mergeHistogram(io.prometheus.client.Metrics.Histogram value) {
+ if (histogramBuilder_ == null) {
+ if (((bitField0_ & 0x00000020) == 0x00000020) &&
+ histogram_ != io.prometheus.client.Metrics.Histogram.getDefaultInstance()) {
+ histogram_ =
+ io.prometheus.client.Metrics.Histogram.newBuilder(histogram_).mergeFrom(value).buildPartial();
+ } else {
+ histogram_ = value;
+ }
+ onChanged();
+ } else {
+ histogramBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000020;
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
+ */
+ public Builder clearHistogram() {
+ if (histogramBuilder_ == null) {
+ histogram_ = io.prometheus.client.Metrics.Histogram.getDefaultInstance();
+ onChanged();
+ } else {
+ histogramBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000020);
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
+ */
+ public io.prometheus.client.Metrics.Histogram.Builder getHistogramBuilder() {
+ bitField0_ |= 0x00000020;
+ onChanged();
+ return getHistogramFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
+ */
+ public io.prometheus.client.Metrics.HistogramOrBuilder getHistogramOrBuilder() {
+ if (histogramBuilder_ != null) {
+ return histogramBuilder_.getMessageOrBuilder();
+ } else {
+ return histogram_;
+ }
+ }
+ /**
+ * <code>optional .io.prometheus.client.Histogram histogram = 7;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ io.prometheus.client.Metrics.Histogram, io.prometheus.client.Metrics.Histogram.Builder, io.prometheus.client.Metrics.HistogramOrBuilder>
+ getHistogramFieldBuilder() {
+ if (histogramBuilder_ == null) {
+ histogramBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ io.prometheus.client.Metrics.Histogram, io.prometheus.client.Metrics.Histogram.Builder, io.prometheus.client.Metrics.HistogramOrBuilder>(
+ getHistogram(),
+ getParentForChildren(),
+ isClean());
+ histogram_ = null;
+ }
+ return histogramBuilder_;
+ }
+
+ private long timestampMs_ ;
+ /**
+ * <code>optional int64 timestamp_ms = 6;</code>
+ */
+ public boolean hasTimestampMs() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * <code>optional int64 timestamp_ms = 6;</code>
+ */
+ public long getTimestampMs() {
+ return timestampMs_;
+ }
+ /**
+ * <code>optional int64 timestamp_ms = 6;</code>
+ */
+ public Builder setTimestampMs(long value) {
+ bitField0_ |= 0x00000040;
+ timestampMs_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int64 timestamp_ms = 6;</code>
+ */
+ public Builder clearTimestampMs() {
+ bitField0_ = (bitField0_ & ~0x00000040);
+ timestampMs_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:io.prometheus.client.Metric)
+ }
+
+ static {
+ defaultInstance = new Metric(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.Metric)
+ }
+
+ public interface MetricFamilyOrBuilder extends
+ // @@protoc_insertion_point(interface_extends:io.prometheus.client.MetricFamily)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ boolean hasName();
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ java.lang.String getName();
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ com.google.protobuf.ByteString
+ getNameBytes();
+
+ /**
+ * <code>optional string help = 2;</code>
+ */
+ boolean hasHelp();
+ /**
+ * <code>optional string help = 2;</code>
+ */
+ java.lang.String getHelp();
+ /**
+ * <code>optional string help = 2;</code>
+ */
+ com.google.protobuf.ByteString
+ getHelpBytes();
+
+ /**
+ * <code>optional .io.prometheus.client.MetricType type = 3;</code>
+ */
+ boolean hasType();
+ /**
+ * <code>optional .io.prometheus.client.MetricType type = 3;</code>
+ */
+ io.prometheus.client.Metrics.MetricType getType();
+
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ java.util.List<io.prometheus.client.Metrics.Metric>
+ getMetricList();
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ io.prometheus.client.Metrics.Metric getMetric(int index);
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ int getMetricCount();
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ java.util.List<? extends io.prometheus.client.Metrics.MetricOrBuilder>
+ getMetricOrBuilderList();
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ io.prometheus.client.Metrics.MetricOrBuilder getMetricOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.MetricFamily}
+ */
+ public static final class MetricFamily extends
+ com.google.protobuf.GeneratedMessage implements
+ // @@protoc_insertion_point(message_implements:io.prometheus.client.MetricFamily)
+ MetricFamilyOrBuilder {
+ // Use MetricFamily.newBuilder() to construct.
+ private MetricFamily(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private MetricFamily(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final MetricFamily defaultInstance;
+ public static MetricFamily getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public MetricFamily getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private MetricFamily(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ com.google.protobuf.ByteString bs = input.readBytes();
+ bitField0_ |= 0x00000001;
+ name_ = bs;
+ break;
+ }
+ case 18: {
+ com.google.protobuf.ByteString bs = input.readBytes();
+ bitField0_ |= 0x00000002;
+ help_ = bs;
+ break;
+ }
+ case 24: {
+ int rawValue = input.readEnum();
+ io.prometheus.client.Metrics.MetricType value = io.prometheus.client.Metrics.MetricType.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(3, rawValue);
+ } else {
+ bitField0_ |= 0x00000004;
+ type_ = value;
+ }
+ break;
+ }
+ case 34: {
+ if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
+ metric_ = new java.util.ArrayList<io.prometheus.client.Metrics.Metric>();
+ mutable_bitField0_ |= 0x00000008;
+ }
+ metric_.add(input.readMessage(io.prometheus.client.Metrics.Metric.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
+ metric_ = java.util.Collections.unmodifiableList(metric_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_MetricFamily_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_MetricFamily_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.MetricFamily.class, io.prometheus.client.Metrics.MetricFamily.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<MetricFamily> PARSER =
+ new com.google.protobuf.AbstractParser<MetricFamily>() {
+ public MetricFamily parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new MetricFamily(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<MetricFamily> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private java.lang.Object name_;
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ name_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int HELP_FIELD_NUMBER = 2;
+ private java.lang.Object help_;
+ /**
+ * <code>optional string help = 2;</code>
+ */
+ public boolean hasHelp() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional string help = 2;</code>
+ */
+ public java.lang.String getHelp() {
+ java.lang.Object ref = help_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ help_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>optional string help = 2;</code>
+ */
+ public com.google.protobuf.ByteString
+ getHelpBytes() {
+ java.lang.Object ref = help_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ help_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int TYPE_FIELD_NUMBER = 3;
+ private io.prometheus.client.Metrics.MetricType type_;
+ /**
+ * <code>optional .io.prometheus.client.MetricType type = 3;</code>
+ */
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional .io.prometheus.client.MetricType type = 3;</code>
+ */
+ public io.prometheus.client.Metrics.MetricType getType() {
+ return type_;
+ }
+
+ public static final int METRIC_FIELD_NUMBER = 4;
+ private java.util.List<io.prometheus.client.Metrics.Metric> metric_;
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public java.util.List<io.prometheus.client.Metrics.Metric> getMetricList() {
+ return metric_;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public java.util.List<? extends io.prometheus.client.Metrics.MetricOrBuilder>
+ getMetricOrBuilderList() {
+ return metric_;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public int getMetricCount() {
+ return metric_.size();
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public io.prometheus.client.Metrics.Metric getMetric(int index) {
+ return metric_.get(index);
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public io.prometheus.client.Metrics.MetricOrBuilder getMetricOrBuilder(
+ int index) {
+ return metric_.get(index);
+ }
+
+ private void initFields() {
+ name_ = "";
+ help_ = "";
+ type_ = io.prometheus.client.Metrics.MetricType.COUNTER;
+ metric_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getHelpBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeEnum(3, type_.getNumber());
+ }
+ for (int i = 0; i < metric_.size(); i++) {
+ output.writeMessage(4, metric_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getHelpBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(3, type_.getNumber());
+ }
+ for (int i = 0; i < metric_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(4, metric_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static io.prometheus.client.Metrics.MetricFamily parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.MetricFamily parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.MetricFamily parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static io.prometheus.client.Metrics.MetricFamily parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.MetricFamily parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.MetricFamily parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.MetricFamily parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static io.prometheus.client.Metrics.MetricFamily parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static io.prometheus.client.Metrics.MetricFamily parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static io.prometheus.client.Metrics.MetricFamily parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(io.prometheus.client.Metrics.MetricFamily prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code io.prometheus.client.MetricFamily}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder> implements
+ // @@protoc_insertion_point(builder_implements:io.prometheus.client.MetricFamily)
+ io.prometheus.client.Metrics.MetricFamilyOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_MetricFamily_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_MetricFamily_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ io.prometheus.client.Metrics.MetricFamily.class, io.prometheus.client.Metrics.MetricFamily.Builder.class);
+ }
+
+ // Construct using io.prometheus.client.Metrics.MetricFamily.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getMetricFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ name_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ help_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ type_ = io.prometheus.client.Metrics.MetricType.COUNTER;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ if (metricBuilder_ == null) {
+ metric_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000008);
+ } else {
+ metricBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return io.prometheus.client.Metrics.internal_static_io_prometheus_client_MetricFamily_descriptor;
+ }
+
+ public io.prometheus.client.Metrics.MetricFamily getDefaultInstanceForType() {
+ return io.prometheus.client.Metrics.MetricFamily.getDefaultInstance();
+ }
+
+ public io.prometheus.client.Metrics.MetricFamily build() {
+ io.prometheus.client.Metrics.MetricFamily result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public io.prometheus.client.Metrics.MetricFamily buildPartial() {
+ io.prometheus.client.Metrics.MetricFamily result = new io.prometheus.client.Metrics.MetricFamily(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.name_ = name_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.help_ = help_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.type_ = type_;
+ if (metricBuilder_ == null) {
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ metric_ = java.util.Collections.unmodifiableList(metric_);
+ bitField0_ = (bitField0_ & ~0x00000008);
+ }
+ result.metric_ = metric_;
+ } else {
+ result.metric_ = metricBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof io.prometheus.client.Metrics.MetricFamily) {
+ return mergeFrom((io.prometheus.client.Metrics.MetricFamily)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(io.prometheus.client.Metrics.MetricFamily other) {
+ if (other == io.prometheus.client.Metrics.MetricFamily.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ bitField0_ |= 0x00000001;
+ name_ = other.name_;
+ onChanged();
+ }
+ if (other.hasHelp()) {
+ bitField0_ |= 0x00000002;
+ help_ = other.help_;
+ onChanged();
+ }
+ if (other.hasType()) {
+ setType(other.getType());
+ }
+ if (metricBuilder_ == null) {
+ if (!other.metric_.isEmpty()) {
+ if (metric_.isEmpty()) {
+ metric_ = other.metric_;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ } else {
+ ensureMetricIsMutable();
+ metric_.addAll(other.metric_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.metric_.isEmpty()) {
+ if (metricBuilder_.isEmpty()) {
+ metricBuilder_.dispose();
+ metricBuilder_ = null;
+ metric_ = other.metric_;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ metricBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getMetricFieldBuilder() : null;
+ } else {
+ metricBuilder_.addAllMessages(other.metric_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ io.prometheus.client.Metrics.MetricFamily parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (io.prometheus.client.Metrics.MetricFamily) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ private java.lang.Object name_ = "";
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ name_ = s;
+ }
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public Builder setName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public Builder clearName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ name_ = getDefaultInstance().getName();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string name = 1;</code>
+ */
+ public Builder setNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+
+ private java.lang.Object help_ = "";
+ /**
+ * <code>optional string help = 2;</code>
+ */
+ public boolean hasHelp() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional string help = 2;</code>
+ */
+ public java.lang.String getHelp() {
+ java.lang.Object ref = help_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ help_ = s;
+ }
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>optional string help = 2;</code>
+ */
+ public com.google.protobuf.ByteString
+ getHelpBytes() {
+ java.lang.Object ref = help_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ help_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>optional string help = 2;</code>
+ */
+ public Builder setHelp(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ help_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string help = 2;</code>
+ */
+ public Builder clearHelp() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ help_ = getDefaultInstance().getHelp();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string help = 2;</code>
+ */
+ public Builder setHelpBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ help_ = value;
+ onChanged();
+ return this;
+ }
+
+ private io.prometheus.client.Metrics.MetricType type_ = io.prometheus.client.Metrics.MetricType.COUNTER;
+ /**
+ * <code>optional .io.prometheus.client.MetricType type = 3;</code>
+ */
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional .io.prometheus.client.MetricType type = 3;</code>
+ */
+ public io.prometheus.client.Metrics.MetricType getType() {
+ return type_;
+ }
+ /**
+ * <code>optional .io.prometheus.client.MetricType type = 3;</code>
+ */
+ public Builder setType(io.prometheus.client.Metrics.MetricType value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ type_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional .io.prometheus.client.MetricType type = 3;</code>
+ */
+ public Builder clearType() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ type_ = io.prometheus.client.Metrics.MetricType.COUNTER;
+ onChanged();
+ return this;
+ }
+
+ private java.util.List<io.prometheus.client.Metrics.Metric> metric_ =
+ java.util.Collections.emptyList();
+ private void ensureMetricIsMutable() {
+ if (!((bitField0_ & 0x00000008) == 0x00000008)) {
+ metric_ = new java.util.ArrayList<io.prometheus.client.Metrics.Metric>(metric_);
+ bitField0_ |= 0x00000008;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ io.prometheus.client.Metrics.Metric, io.prometheus.client.Metrics.Metric.Builder, io.prometheus.client.Metrics.MetricOrBuilder> metricBuilder_;
+
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public java.util.List<io.prometheus.client.Metrics.Metric> getMetricList() {
+ if (metricBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(metric_);
+ } else {
+ return metricBuilder_.getMessageList();
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public int getMetricCount() {
+ if (metricBuilder_ == null) {
+ return metric_.size();
+ } else {
+ return metricBuilder_.getCount();
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public io.prometheus.client.Metrics.Metric getMetric(int index) {
+ if (metricBuilder_ == null) {
+ return metric_.get(index);
+ } else {
+ return metricBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public Builder setMetric(
+ int index, io.prometheus.client.Metrics.Metric value) {
+ if (metricBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureMetricIsMutable();
+ metric_.set(index, value);
+ onChanged();
+ } else {
+ metricBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public Builder setMetric(
+ int index, io.prometheus.client.Metrics.Metric.Builder builderForValue) {
+ if (metricBuilder_ == null) {
+ ensureMetricIsMutable();
+ metric_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ metricBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public Builder addMetric(io.prometheus.client.Metrics.Metric value) {
+ if (metricBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureMetricIsMutable();
+ metric_.add(value);
+ onChanged();
+ } else {
+ metricBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public Builder addMetric(
+ int index, io.prometheus.client.Metrics.Metric value) {
+ if (metricBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureMetricIsMutable();
+ metric_.add(index, value);
+ onChanged();
+ } else {
+ metricBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public Builder addMetric(
+ io.prometheus.client.Metrics.Metric.Builder builderForValue) {
+ if (metricBuilder_ == null) {
+ ensureMetricIsMutable();
+ metric_.add(builderForValue.build());
+ onChanged();
+ } else {
+ metricBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public Builder addMetric(
+ int index, io.prometheus.client.Metrics.Metric.Builder builderForValue) {
+ if (metricBuilder_ == null) {
+ ensureMetricIsMutable();
+ metric_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ metricBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public Builder addAllMetric(
+ java.lang.Iterable<? extends io.prometheus.client.Metrics.Metric> values) {
+ if (metricBuilder_ == null) {
+ ensureMetricIsMutable();
+ com.google.protobuf.AbstractMessageLite.Builder.addAll(
+ values, metric_);
+ onChanged();
+ } else {
+ metricBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public Builder clearMetric() {
+ if (metricBuilder_ == null) {
+ metric_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000008);
+ onChanged();
+ } else {
+ metricBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public Builder removeMetric(int index) {
+ if (metricBuilder_ == null) {
+ ensureMetricIsMutable();
+ metric_.remove(index);
+ onChanged();
+ } else {
+ metricBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public io.prometheus.client.Metrics.Metric.Builder getMetricBuilder(
+ int index) {
+ return getMetricFieldBuilder().getBuilder(index);
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public io.prometheus.client.Metrics.MetricOrBuilder getMetricOrBuilder(
+ int index) {
+ if (metricBuilder_ == null) {
+ return metric_.get(index); } else {
+ return metricBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public java.util.List<? extends io.prometheus.client.Metrics.MetricOrBuilder>
+ getMetricOrBuilderList() {
+ if (metricBuilder_ != null) {
+ return metricBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(metric_);
+ }
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public io.prometheus.client.Metrics.Metric.Builder addMetricBuilder() {
+ return getMetricFieldBuilder().addBuilder(
+ io.prometheus.client.Metrics.Metric.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public io.prometheus.client.Metrics.Metric.Builder addMetricBuilder(
+ int index) {
+ return getMetricFieldBuilder().addBuilder(
+ index, io.prometheus.client.Metrics.Metric.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .io.prometheus.client.Metric metric = 4;</code>
+ */
+ public java.util.List<io.prometheus.client.Metrics.Metric.Builder>
+ getMetricBuilderList() {
+ return getMetricFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ io.prometheus.client.Metrics.Metric, io.prometheus.client.Metrics.Metric.Builder, io.prometheus.client.Metrics.MetricOrBuilder>
+ getMetricFieldBuilder() {
+ if (metricBuilder_ == null) {
+ metricBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ io.prometheus.client.Metrics.Metric, io.prometheus.client.Metrics.Metric.Builder, io.prometheus.client.Metrics.MetricOrBuilder>(
+ metric_,
+ ((bitField0_ & 0x00000008) == 0x00000008),
+ getParentForChildren(),
+ isClean());
+ metric_ = null;
+ }
+ return metricBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:io.prometheus.client.MetricFamily)
+ }
+
+ static {
+ defaultInstance = new MetricFamily(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:io.prometheus.client.MetricFamily)
+ }
+
+ private static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_io_prometheus_client_LabelPair_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_io_prometheus_client_LabelPair_fieldAccessorTable;
+ private static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_io_prometheus_client_Gauge_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_io_prometheus_client_Gauge_fieldAccessorTable;
+ private static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_io_prometheus_client_Counter_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_io_prometheus_client_Counter_fieldAccessorTable;
+ private static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_io_prometheus_client_Quantile_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_io_prometheus_client_Quantile_fieldAccessorTable;
+ private static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_io_prometheus_client_Summary_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_io_prometheus_client_Summary_fieldAccessorTable;
+ private static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_io_prometheus_client_Untyped_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_io_prometheus_client_Untyped_fieldAccessorTable;
+ private static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_io_prometheus_client_Histogram_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_io_prometheus_client_Histogram_fieldAccessorTable;
+ private static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_io_prometheus_client_Bucket_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_io_prometheus_client_Bucket_fieldAccessorTable;
+ private static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_io_prometheus_client_Metric_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_io_prometheus_client_Metric_fieldAccessorTable;
+ private static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_io_prometheus_client_MetricFamily_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_io_prometheus_client_MetricFamily_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\rmetrics.proto\022\024io.prometheus.client\"(\n" +
+ "\tLabelPair\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002 \001(\t\"" +
+ "\026\n\005Gauge\022\r\n\005value\030\001 \001(\001\"\030\n\007Counter\022\r\n\005va" +
+ "lue\030\001 \001(\001\"+\n\010Quantile\022\020\n\010quantile\030\001 \001(\001\022" +
+ "\r\n\005value\030\002 \001(\001\"e\n\007Summary\022\024\n\014sample_coun" +
+ "t\030\001 \001(\004\022\022\n\nsample_sum\030\002 \001(\001\0220\n\010quantile\030" +
+ "\003 \003(\0132\036.io.prometheus.client.Quantile\"\030\n" +
+ "\007Untyped\022\r\n\005value\030\001 \001(\001\"c\n\tHistogram\022\024\n\014" +
+ "sample_count\030\001 \001(\004\022\022\n\nsample_sum\030\002 \001(\001\022," +
+ "\n\006bucket\030\003 \003(\0132\034.io.prometheus.client.Bu",
+ "cket\"7\n\006Bucket\022\030\n\020cumulative_count\030\001 \001(\004" +
+ "\022\023\n\013upper_bound\030\002 \001(\001\"\276\002\n\006Metric\022.\n\005labe" +
+ "l\030\001 \003(\0132\037.io.prometheus.client.LabelPair" +
+ "\022*\n\005gauge\030\002 \001(\0132\033.io.prometheus.client.G" +
+ "auge\022.\n\007counter\030\003 \001(\0132\035.io.prometheus.cl" +
+ "ient.Counter\022.\n\007summary\030\004 \001(\0132\035.io.prome" +
+ "theus.client.Summary\022.\n\007untyped\030\005 \001(\0132\035." +
+ "io.prometheus.client.Untyped\0222\n\thistogra" +
+ "m\030\007 \001(\0132\037.io.prometheus.client.Histogram" +
+ "\022\024\n\014timestamp_ms\030\006 \001(\003\"\210\001\n\014MetricFamily\022",
+ "\014\n\004name\030\001 \001(\t\022\014\n\004help\030\002 \001(\t\022.\n\004type\030\003 \001(" +
+ "\0162 .io.prometheus.client.MetricType\022,\n\006m" +
+ "etric\030\004 \003(\0132\034.io.prometheus.client.Metri" +
+ "c*M\n\nMetricType\022\013\n\007COUNTER\020\000\022\t\n\005GAUGE\020\001\022" +
+ "\013\n\007SUMMARY\020\002\022\013\n\007UNTYPED\020\003\022\r\n\tHISTOGRAM\020\004" +
+ "B\026\n\024io.prometheus.client"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ internal_static_io_prometheus_client_LabelPair_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_io_prometheus_client_LabelPair_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_io_prometheus_client_LabelPair_descriptor,
+ new java.lang.String[] { "Name", "Value", });
+ internal_static_io_prometheus_client_Gauge_descriptor =
+ getDescriptor().getMessageTypes().get(1);
+ internal_static_io_prometheus_client_Gauge_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_io_prometheus_client_Gauge_descriptor,
+ new java.lang.String[] { "Value", });
+ internal_static_io_prometheus_client_Counter_descriptor =
+ getDescriptor().getMessageTypes().get(2);
+ internal_static_io_prometheus_client_Counter_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_io_prometheus_client_Counter_descriptor,
+ new java.lang.String[] { "Value", });
+ internal_static_io_prometheus_client_Quantile_descriptor =
+ getDescriptor().getMessageTypes().get(3);
+ internal_static_io_prometheus_client_Quantile_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_io_prometheus_client_Quantile_descriptor,
+ new java.lang.String[] { "Quantile", "Value", });
+ internal_static_io_prometheus_client_Summary_descriptor =
+ getDescriptor().getMessageTypes().get(4);
+ internal_static_io_prometheus_client_Summary_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_io_prometheus_client_Summary_descriptor,
+ new java.lang.String[] { "SampleCount", "SampleSum", "Quantile", });
+ internal_static_io_prometheus_client_Untyped_descriptor =
+ getDescriptor().getMessageTypes().get(5);
+ internal_static_io_prometheus_client_Untyped_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_io_prometheus_client_Untyped_descriptor,
+ new java.lang.String[] { "Value", });
+ internal_static_io_prometheus_client_Histogram_descriptor =
+ getDescriptor().getMessageTypes().get(6);
+ internal_static_io_prometheus_client_Histogram_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_io_prometheus_client_Histogram_descriptor,
+ new java.lang.String[] { "SampleCount", "SampleSum", "Bucket", });
+ internal_static_io_prometheus_client_Bucket_descriptor =
+ getDescriptor().getMessageTypes().get(7);
+ internal_static_io_prometheus_client_Bucket_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_io_prometheus_client_Bucket_descriptor,
+ new java.lang.String[] { "CumulativeCount", "UpperBound", });
+ internal_static_io_prometheus_client_Metric_descriptor =
+ getDescriptor().getMessageTypes().get(8);
+ internal_static_io_prometheus_client_Metric_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_io_prometheus_client_Metric_descriptor,
+ new java.lang.String[] { "Label", "Gauge", "Counter", "Summary", "Untyped", "Histogram", "TimestampMs", });
+ internal_static_io_prometheus_client_MetricFamily_descriptor =
+ getDescriptor().getMessageTypes().get(9);
+ internal_static_io_prometheus_client_MetricFamily_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_io_prometheus_client_MetricFamily_descriptor,
+ new java.lang.String[] { "Name", "Help", "Type", "Metric", });
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/vendor/github.com/prometheus/common/.travis.yml b/vendor/github.com/prometheus/common/.travis.yml
new file mode 100644
index 000000000..69b2431c8
--- /dev/null
+++ b/vendor/github.com/prometheus/common/.travis.yml
@@ -0,0 +1,7 @@
+sudo: false
+
+language: go
+go:
+ - 1.5.4
+ - 1.6.2
+ - tip
diff --git a/vendor/github.com/prometheus/common/AUTHORS.md b/vendor/github.com/prometheus/common/AUTHORS.md
new file mode 100644
index 000000000..c63f4d395
--- /dev/null
+++ b/vendor/github.com/prometheus/common/AUTHORS.md
@@ -0,0 +1,11 @@
+Maintainers of this repository:
+
+* Fabian Reinartz <fabian@soundcloud.com>
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Björn Rabenstein <beorn@soundcloud.com>
+* Fabian Reinartz <fabian@soundcloud.com>
+* Julius Volz <julius.volz@gmail.com>
+* Miguel Molina <hi@mvader.me>
diff --git a/vendor/github.com/prometheus/common/CONTRIBUTING.md b/vendor/github.com/prometheus/common/CONTRIBUTING.md
new file mode 100644
index 000000000..5705f0fbe
--- /dev/null
+++ b/vendor/github.com/prometheus/common/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/prometheus/common/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE
new file mode 100644
index 000000000..636a2c1a5
--- /dev/null
+++ b/vendor/github.com/prometheus/common/NOTICE
@@ -0,0 +1,5 @@
+Common libraries shared by Prometheus Go components.
+Copyright 2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/common/README.md b/vendor/github.com/prometheus/common/README.md
new file mode 100644
index 000000000..98f6ce24b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/README.md
@@ -0,0 +1,12 @@
+# Common
+[![Build Status](https://travis-ci.org/prometheus/common.svg)](https://travis-ci.org/prometheus/common)
+
+This repository contains Go libraries that are shared across Prometheus
+components and libraries.
+
+* **config**: Common configuration structures
+* **expfmt**: Decoding and encoding for the exposition format
+* **log**: A logging wrapper around [logrus](https://github.com/Sirupsen/logrus)
+* **model**: Shared data structures
+* **route**: A routing wrapper around [httprouter](https://github.com/julienschmidt/httprouter) using `context.Context`
+* **version**: Version informations and metric
diff --git a/vendor/github.com/prometheus/common/config/config.go b/vendor/github.com/prometheus/common/config/config.go
new file mode 100644
index 000000000..33eb922ce
--- /dev/null
+++ b/vendor/github.com/prometheus/common/config/config.go
@@ -0,0 +1,30 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "fmt"
+ "strings"
+)
+
+func checkOverflow(m map[string]interface{}, ctx string) error {
+ if len(m) > 0 {
+ var keys []string
+ for k := range m {
+ keys = append(keys, k)
+ }
+ return fmt.Errorf("unknown fields in %s: %s", ctx, strings.Join(keys, ", "))
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/config/testdata/tls_config.cert_no_key.bad.yml b/vendor/github.com/prometheus/common/config/testdata/tls_config.cert_no_key.bad.yml
new file mode 100644
index 000000000..7dfdc1ead
--- /dev/null
+++ b/vendor/github.com/prometheus/common/config/testdata/tls_config.cert_no_key.bad.yml
@@ -0,0 +1 @@
+cert_file: somefile
diff --git a/vendor/github.com/prometheus/common/config/testdata/tls_config.empty.good.yml b/vendor/github.com/prometheus/common/config/testdata/tls_config.empty.good.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/prometheus/common/config/testdata/tls_config.empty.good.yml
diff --git a/vendor/github.com/prometheus/common/config/testdata/tls_config.insecure.good.yml b/vendor/github.com/prometheus/common/config/testdata/tls_config.insecure.good.yml
new file mode 100644
index 000000000..d054383f1
--- /dev/null
+++ b/vendor/github.com/prometheus/common/config/testdata/tls_config.insecure.good.yml
@@ -0,0 +1 @@
+insecure_skip_verify: true
diff --git a/vendor/github.com/prometheus/common/config/testdata/tls_config.invalid_field.bad.yml b/vendor/github.com/prometheus/common/config/testdata/tls_config.invalid_field.bad.yml
new file mode 100644
index 000000000..12cbaac3b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/config/testdata/tls_config.invalid_field.bad.yml
@@ -0,0 +1 @@
+something_invalid: true
diff --git a/vendor/github.com/prometheus/common/config/testdata/tls_config.key_no_cert.bad.yml b/vendor/github.com/prometheus/common/config/testdata/tls_config.key_no_cert.bad.yml
new file mode 100644
index 000000000..cec045e89
--- /dev/null
+++ b/vendor/github.com/prometheus/common/config/testdata/tls_config.key_no_cert.bad.yml
@@ -0,0 +1 @@
+key_file: somefile
diff --git a/vendor/github.com/prometheus/common/config/tls_config.go b/vendor/github.com/prometheus/common/config/tls_config.go
new file mode 100644
index 000000000..7c7e7cb02
--- /dev/null
+++ b/vendor/github.com/prometheus/common/config/tls_config.go
@@ -0,0 +1,79 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+)
+
+// TLSConfig configures the options for TLS connections.
+type TLSConfig struct {
+ // The CA cert to use for the targets.
+ CAFile string `yaml:"ca_file,omitempty"`
+ // The client cert file for the targets.
+ CertFile string `yaml:"cert_file,omitempty"`
+ // The client key file for the targets.
+ KeyFile string `yaml:"key_file,omitempty"`
+ // Disable target certificate validation.
+ InsecureSkipVerify bool `yaml:"insecure_skip_verify"`
+
+ // Catches all undefined fields and must be empty after parsing.
+ XXX map[string]interface{} `yaml:",inline"`
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ type plain TLSConfig
+ if err := unmarshal((*plain)(c)); err != nil {
+ return err
+ }
+ return checkOverflow(c.XXX, "TLS config")
+}
+
+// GenerateConfig produces a tls.Config based on TLS connection options.
+// It loads certificate files from disk if they are defined.
+func (c *TLSConfig) GenerateConfig() (*tls.Config, error) {
+ tlsConfig := &tls.Config{InsecureSkipVerify: c.InsecureSkipVerify}
+
+ // If a CA cert is provided then let's read it in so we can validate the
+ // scrape target's certificate properly.
+ if len(c.CAFile) > 0 {
+ caCertPool := x509.NewCertPool()
+ // Load CA cert.
+ caCert, err := ioutil.ReadFile(c.CAFile)
+ if err != nil {
+ return nil, fmt.Errorf("unable to use specified CA cert %s: %s", c.CAFile, err)
+ }
+ caCertPool.AppendCertsFromPEM(caCert)
+ tlsConfig.RootCAs = caCertPool
+ }
+
+ if len(c.CertFile) > 0 && len(c.KeyFile) == 0 {
+ return nil, fmt.Errorf("client cert file %q specified without client key file", c.CertFile)
+ } else if len(c.KeyFile) > 0 && len(c.CertFile) == 0 {
+ return nil, fmt.Errorf("client key file %q specified without client cert file", c.KeyFile)
+ } else if len(c.CertFile) > 0 && len(c.KeyFile) > 0 {
+ cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile)
+ if err != nil {
+ return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{cert}
+ }
+ tlsConfig.BuildNameToCertificate()
+
+ return tlsConfig, nil
+}
diff --git a/vendor/github.com/prometheus/common/config/tls_config_test.go b/vendor/github.com/prometheus/common/config/tls_config_test.go
new file mode 100644
index 000000000..444303532
--- /dev/null
+++ b/vendor/github.com/prometheus/common/config/tls_config_test.go
@@ -0,0 +1,92 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "crypto/tls"
+ "io/ioutil"
+ "reflect"
+ "strings"
+ "testing"
+
+ "gopkg.in/yaml.v2"
+)
+
+// LoadTLSConfig parses the given YAML file into a tls.Config.
+func LoadTLSConfig(filename string) (*tls.Config, error) {
+ content, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ cfg := &TLSConfig{}
+ if err = yaml.Unmarshal(content, cfg); err != nil {
+ return nil, err
+ }
+ return cfg.GenerateConfig()
+}
+
+var expectedTLSConfigs = []struct {
+ filename string
+ config *tls.Config
+}{
+ {
+ filename: "tls_config.empty.good.yml",
+ config: &tls.Config{},
+ }, {
+ filename: "tls_config.insecure.good.yml",
+ config: &tls.Config{InsecureSkipVerify: true},
+ },
+}
+
+func TestValidTLSConfig(t *testing.T) {
+ for _, cfg := range expectedTLSConfigs {
+ cfg.config.BuildNameToCertificate()
+ got, err := LoadTLSConfig("testdata/" + cfg.filename)
+ if err != nil {
+ t.Errorf("Error parsing %s: %s", cfg.filename, err)
+ }
+ if !reflect.DeepEqual(*got, *cfg.config) {
+ t.Fatalf("%s: unexpected config result: \n\n%s\n expected\n\n%s", cfg.filename, got, cfg.config)
+ }
+ }
+}
+
+var expectedTLSConfigErrors = []struct {
+ filename string
+ errMsg string
+}{
+ {
+ filename: "tls_config.invalid_field.bad.yml",
+ errMsg: "unknown fields in",
+ }, {
+ filename: "tls_config.cert_no_key.bad.yml",
+ errMsg: "specified without client key file",
+ }, {
+ filename: "tls_config.key_no_cert.bad.yml",
+ errMsg: "specified without client cert file",
+ },
+}
+
+func TestBadTLSConfigs(t *testing.T) {
+ for _, ee := range expectedTLSConfigErrors {
+ _, err := LoadTLSConfig("testdata/" + ee.filename)
+ if err == nil {
+ t.Errorf("Expected error parsing %s but got none", ee.filename)
+ continue
+ }
+ if !strings.Contains(err.Error(), ee.errMsg) {
+ t.Errorf("Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/bench_test.go b/vendor/github.com/prometheus/common/expfmt/bench_test.go
new file mode 100644
index 000000000..e539bfc13
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/bench_test.go
@@ -0,0 +1,167 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bytes"
+ "compress/gzip"
+ "io"
+ "io/ioutil"
+ "testing"
+
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+var parser TextParser
+
+// Benchmarks to show how much penalty text format parsing actually inflicts.
+//
+// Example results on Linux 3.13.0, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz, go1.4.
+//
+// BenchmarkParseText 1000 1188535 ns/op 205085 B/op 6135 allocs/op
+// BenchmarkParseTextGzip 1000 1376567 ns/op 246224 B/op 6151 allocs/op
+// BenchmarkParseProto 10000 172790 ns/op 52258 B/op 1160 allocs/op
+// BenchmarkParseProtoGzip 5000 324021 ns/op 94931 B/op 1211 allocs/op
+// BenchmarkParseProtoMap 10000 187946 ns/op 58714 B/op 1203 allocs/op
+//
+// CONCLUSION: The overhead for the map is negligible. Text format needs ~5x more allocations.
+// Without compression, it needs ~7x longer, but with compression (the more relevant scenario),
+// the difference becomes less relevant, only ~4x.
+//
+// The test data contains 248 samples.
+
+// BenchmarkParseText benchmarks the parsing of a text-format scrape into metric
+// family DTOs.
+func BenchmarkParseText(b *testing.B) {
+ b.StopTimer()
+ data, err := ioutil.ReadFile("testdata/text")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ if _, err := parser.TextToMetricFamilies(bytes.NewReader(data)); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+// BenchmarkParseTextGzip benchmarks the parsing of a gzipped text-format scrape
+// into metric family DTOs.
+func BenchmarkParseTextGzip(b *testing.B) {
+ b.StopTimer()
+ data, err := ioutil.ReadFile("testdata/text.gz")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ in, err := gzip.NewReader(bytes.NewReader(data))
+ if err != nil {
+ b.Fatal(err)
+ }
+ if _, err := parser.TextToMetricFamilies(in); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+// BenchmarkParseProto benchmarks the parsing of a protobuf-format scrape into
+// metric family DTOs. Note that this does not build a map of metric families
+// (as the text version does), because it is not required for Prometheus
+// ingestion either. (However, it is required for the text-format parsing, as
+// the metric family might be sprinkled all over the text, while the
+// protobuf-format guarantees bundling at one place.)
+func BenchmarkParseProto(b *testing.B) {
+ b.StopTimer()
+ data, err := ioutil.ReadFile("testdata/protobuf")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ family := &dto.MetricFamily{}
+ in := bytes.NewReader(data)
+ for {
+ family.Reset()
+ if _, err := pbutil.ReadDelimited(in, family); err != nil {
+ if err == io.EOF {
+ break
+ }
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+// BenchmarkParseProtoGzip is like BenchmarkParseProto above, but parses gzipped
+// protobuf format.
+func BenchmarkParseProtoGzip(b *testing.B) {
+ b.StopTimer()
+ data, err := ioutil.ReadFile("testdata/protobuf.gz")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ family := &dto.MetricFamily{}
+ in, err := gzip.NewReader(bytes.NewReader(data))
+ if err != nil {
+ b.Fatal(err)
+ }
+ for {
+ family.Reset()
+ if _, err := pbutil.ReadDelimited(in, family); err != nil {
+ if err == io.EOF {
+ break
+ }
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+// BenchmarkParseProtoMap is like BenchmarkParseProto but DOES put the parsed
+// metric family DTOs into a map. This is not happening during Prometheus
+// ingestion. It is just here to measure the overhead of that map creation and
+// separate it from the overhead of the text format parsing.
+func BenchmarkParseProtoMap(b *testing.B) {
+ b.StopTimer()
+ data, err := ioutil.ReadFile("testdata/protobuf")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ families := map[string]*dto.MetricFamily{}
+ in := bytes.NewReader(data)
+ for {
+ family := &dto.MetricFamily{}
+ if _, err := pbutil.ReadDelimited(in, family); err != nil {
+ if err == io.EOF {
+ break
+ }
+ b.Fatal(err)
+ }
+ families[family.GetName()] = family
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
new file mode 100644
index 000000000..487fdc6cc
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -0,0 +1,412 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "mime"
+ "net/http"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/model"
+)
+
+// Decoder types decode an input stream into metric families.
+type Decoder interface {
+ Decode(*dto.MetricFamily) error
+}
+
+type DecodeOptions struct {
+ // Timestamp is added to each value from the stream that has no explicit timestamp set.
+ Timestamp model.Time
+}
+
+// ResponseFormat extracts the correct format from a HTTP response header.
+// If no matching format can be found FormatUnknown is returned.
+func ResponseFormat(h http.Header) Format {
+ ct := h.Get(hdrContentType)
+
+ mediatype, params, err := mime.ParseMediaType(ct)
+ if err != nil {
+ return FmtUnknown
+ }
+
+ const textType = "text/plain"
+
+ switch mediatype {
+ case ProtoType:
+ if p, ok := params["proto"]; ok && p != ProtoProtocol {
+ return FmtUnknown
+ }
+ if e, ok := params["encoding"]; ok && e != "delimited" {
+ return FmtUnknown
+ }
+ return FmtProtoDelim
+
+ case textType:
+ if v, ok := params["version"]; ok && v != TextVersion {
+ return FmtUnknown
+ }
+ return FmtText
+ }
+
+ return FmtUnknown
+}
+
+// NewDecoder returns a new decoder based on the given input format.
+// If the input format does not imply otherwise, a text format decoder is returned.
+func NewDecoder(r io.Reader, format Format) Decoder {
+ switch format {
+ case FmtProtoDelim:
+ return &protoDecoder{r: r}
+ }
+ return &textDecoder{r: r}
+}
+
+// protoDecoder implements the Decoder interface for protocol buffers.
+type protoDecoder struct {
+ r io.Reader
+}
+
+// Decode implements the Decoder interface.
+func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
+ _, err := pbutil.ReadDelimited(d.r, v)
+ if err != nil {
+ return err
+ }
+ if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
+ return fmt.Errorf("invalid metric name %q", v.GetName())
+ }
+ for _, m := range v.GetMetric() {
+ if m == nil {
+ continue
+ }
+ for _, l := range m.GetLabel() {
+ if l == nil {
+ continue
+ }
+ if !model.LabelValue(l.GetValue()).IsValid() {
+ return fmt.Errorf("invalid label value %q", l.GetValue())
+ }
+ if !model.LabelName(l.GetName()).IsValid() {
+ return fmt.Errorf("invalid label name %q", l.GetName())
+ }
+ }
+ }
+ return nil
+}
+
+// textDecoder implements the Decoder interface for the text protocol.
+type textDecoder struct {
+ r io.Reader
+ p TextParser
+ fams []*dto.MetricFamily
+}
+
+// Decode implements the Decoder interface.
+func (d *textDecoder) Decode(v *dto.MetricFamily) error {
+ // TODO(fabxc): Wrap this as a line reader to make streaming safer.
+ if len(d.fams) == 0 {
+ // No cached metric families, read everything and parse metrics.
+ fams, err := d.p.TextToMetricFamilies(d.r)
+ if err != nil {
+ return err
+ }
+ if len(fams) == 0 {
+ return io.EOF
+ }
+ d.fams = make([]*dto.MetricFamily, 0, len(fams))
+ for _, f := range fams {
+ d.fams = append(d.fams, f)
+ }
+ }
+
+ *v = *d.fams[0]
+ d.fams = d.fams[1:]
+
+ return nil
+}
+
+type SampleDecoder struct {
+ Dec Decoder
+ Opts *DecodeOptions
+
+ f dto.MetricFamily
+}
+
+func (sd *SampleDecoder) Decode(s *model.Vector) error {
+ if err := sd.Dec.Decode(&sd.f); err != nil {
+ return err
+ }
+ *s = extractSamples(&sd.f, sd.Opts)
+ return nil
+}
+
+// Extract samples builds a slice of samples from the provided metric families.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
+ var all model.Vector
+ for _, f := range fams {
+ all = append(all, extractSamples(f, o)...)
+ }
+ return all
+}
+
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
+ switch f.GetType() {
+ case dto.MetricType_COUNTER:
+ return extractCounter(o, f)
+ case dto.MetricType_GAUGE:
+ return extractGauge(o, f)
+ case dto.MetricType_SUMMARY:
+ return extractSummary(o, f)
+ case dto.MetricType_UNTYPED:
+ return extractUntyped(o, f)
+ case dto.MetricType_HISTOGRAM:
+ return extractHistogram(o, f)
+ }
+ panic("expfmt.extractSamples: unknown metric family type")
+}
+
+func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Counter == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Counter.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Gauge == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Gauge.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Untyped == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Untyped.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Summary == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ for _, q := range m.Summary.Quantile {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ // BUG(matt): Update other names to "quantile".
+ lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetValue()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ return samples
+}
+
+func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Histogram == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ infSeen := false
+
+ for _, q := range m.Histogram.Bucket {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetCumulativeCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ count := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleCount()),
+ Timestamp: timestamp,
+ }
+ samples = append(samples, count)
+
+ if !infSeen {
+ // Append an infinity bucket sample.
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: count.Value,
+ Timestamp: timestamp,
+ })
+ }
+ }
+
+ return samples
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/decode_test.go b/vendor/github.com/prometheus/common/expfmt/decode_test.go
new file mode 100644
index 000000000..c27325a9d
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/decode_test.go
@@ -0,0 +1,367 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "io"
+ "net/http"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+
+ "github.com/prometheus/common/model"
+)
+
+func TestTextDecoder(t *testing.T) {
+ var (
+ ts = model.Now()
+ in = `
+# Only a quite simple scenario with two metric families.
+# More complicated tests of the parser itself can be found in the text package.
+# TYPE mf2 counter
+mf2 3
+mf1{label="value1"} -3.14 123456
+mf1{label="value2"} 42
+mf2 4
+`
+ out = model.Vector{
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "mf1",
+ "label": "value1",
+ },
+ Value: -3.14,
+ Timestamp: 123456,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "mf1",
+ "label": "value2",
+ },
+ Value: 42,
+ Timestamp: ts,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "mf2",
+ },
+ Value: 3,
+ Timestamp: ts,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "mf2",
+ },
+ Value: 4,
+ Timestamp: ts,
+ },
+ }
+ )
+
+ dec := &SampleDecoder{
+ Dec: &textDecoder{r: strings.NewReader(in)},
+ Opts: &DecodeOptions{
+ Timestamp: ts,
+ },
+ }
+ var all model.Vector
+ for {
+ var smpls model.Vector
+ err := dec.Decode(&smpls)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ all = append(all, smpls...)
+ }
+ sort.Sort(all)
+ sort.Sort(out)
+ if !reflect.DeepEqual(all, out) {
+ t.Fatalf("output does not match")
+ }
+}
+
+func TestProtoDecoder(t *testing.T) {
+
+ var testTime = model.Now()
+
+ scenarios := []struct {
+ in string
+ expected model.Vector
+ fail bool
+ }{
+ {
+ in: "",
+ },
+ {
+ in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_!abel_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@",
+ fail: true,
+ },
+ {
+ in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_label_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@",
+ expected: model.Vector{
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count",
+ "some_label_name": "some_label_value",
+ },
+ Value: -42,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count",
+ "another_label_name": "another_label_value",
+ },
+ Value: 84,
+ Timestamp: testTime,
+ },
+ },
+ },
+ {
+ in: "\xb9\x01\n\rrequest_count\x12\x12Number of requests\x18\x02\"O\n#\n\x0fsome_label_name\x12\x10some_label_value\"(\x1a\x12\t\xaeG\xe1z\x14\xae\xef?\x11\x00\x00\x00\x00\x00\x00E\xc0\x1a\x12\t+\x87\x16\xd9\xce\xf7\xef?\x11\x00\x00\x00\x00\x00\x00U\xc0\"A\n)\n\x12another_label_name\x12\x13another_label_value\"\x14\x1a\x12\t\x00\x00\x00\x00\x00\x00\xe0?\x11\x00\x00\x00\x00\x00\x00$@",
+ expected: model.Vector{
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count_count",
+ "some_label_name": "some_label_value",
+ },
+ Value: 0,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count_sum",
+ "some_label_name": "some_label_value",
+ },
+ Value: 0,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count",
+ "some_label_name": "some_label_value",
+ "quantile": "0.99",
+ },
+ Value: -42,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count",
+ "some_label_name": "some_label_value",
+ "quantile": "0.999",
+ },
+ Value: -84,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count_count",
+ "another_label_name": "another_label_value",
+ },
+ Value: 0,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count_sum",
+ "another_label_name": "another_label_value",
+ },
+ Value: 0,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count",
+ "another_label_name": "another_label_value",
+ "quantile": "0.5",
+ },
+ Value: 10,
+ Timestamp: testTime,
+ },
+ },
+ },
+ {
+ in: "\x8d\x01\n\x1drequest_duration_microseconds\x12\x15The response latency.\x18\x04\"S:Q\b\x85\x15\x11\xcd\xcc\xccL\x8f\xcb:A\x1a\v\b{\x11\x00\x00\x00\x00\x00\x00Y@\x1a\f\b\x9c\x03\x11\x00\x00\x00\x00\x00\x00^@\x1a\f\b\xd0\x04\x11\x00\x00\x00\x00\x00\x00b@\x1a\f\b\xf4\v\x11\x9a\x99\x99\x99\x99\x99e@\x1a\f\b\x85\x15\x11\x00\x00\x00\x00\x00\x00\xf0\u007f",
+ expected: model.Vector{
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_bucket",
+ "le": "100",
+ },
+ Value: 123,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_bucket",
+ "le": "120",
+ },
+ Value: 412,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_bucket",
+ "le": "144",
+ },
+ Value: 592,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_bucket",
+ "le": "172.8",
+ },
+ Value: 1524,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_bucket",
+ "le": "+Inf",
+ },
+ Value: 2693,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_sum",
+ },
+ Value: 1756047.3,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_count",
+ },
+ Value: 2693,
+ Timestamp: testTime,
+ },
+ },
+ },
+ {
+ // The metric type is unset in this protobuf, which needs to be handled
+ // correctly by the decoder.
+ in: "\x1c\n\rrequest_count\"\v\x1a\t\t\x00\x00\x00\x00\x00\x00\xf0?",
+ expected: model.Vector{
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count",
+ },
+ Value: 1,
+ Timestamp: testTime,
+ },
+ },
+ },
+ }
+
+ for i, scenario := range scenarios {
+ dec := &SampleDecoder{
+ Dec: &protoDecoder{r: strings.NewReader(scenario.in)},
+ Opts: &DecodeOptions{
+ Timestamp: testTime,
+ },
+ }
+
+ var all model.Vector
+ for {
+ var smpls model.Vector
+ err := dec.Decode(&smpls)
+ if err == io.EOF {
+ break
+ }
+ if scenario.fail {
+ if err == nil {
+ t.Fatal("Expected error but got none")
+ }
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ all = append(all, smpls...)
+ }
+ sort.Sort(all)
+ sort.Sort(scenario.expected)
+ if !reflect.DeepEqual(all, scenario.expected) {
+ t.Fatalf("%d. output does not match, want: %#v, got %#v", i, scenario.expected, all)
+ }
+ }
+}
+
+func testDiscriminatorHTTPHeader(t testing.TB) {
+ var scenarios = []struct {
+ input map[string]string
+ output Format
+ err error
+ }{
+ {
+ input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="delimited"`},
+ output: FmtProtoDelim,
+ },
+ {
+ input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="illegal"; encoding="delimited"`},
+ output: FmtUnknown,
+ },
+ {
+ input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="illegal"`},
+ output: FmtUnknown,
+ },
+ {
+ input: map[string]string{"Content-Type": `text/plain; version=0.0.4`},
+ output: FmtText,
+ },
+ {
+ input: map[string]string{"Content-Type": `text/plain`},
+ output: FmtText,
+ },
+ {
+ input: map[string]string{"Content-Type": `text/plain; version=0.0.3`},
+ output: FmtUnknown,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ var header http.Header
+
+ if len(scenario.input) > 0 {
+ header = http.Header{}
+ }
+
+ for key, value := range scenario.input {
+ header.Add(key, value)
+ }
+
+ actual := ResponseFormat(header)
+
+ if scenario.output != actual {
+ t.Errorf("%d. expected %s, got %s", i, scenario.output, actual)
+ }
+ }
+}
+
+func TestDiscriminatorHTTPHeader(t *testing.T) {
+ testDiscriminatorHTTPHeader(t)
+}
+
+func BenchmarkDiscriminatorHTTPHeader(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testDiscriminatorHTTPHeader(b)
+ }
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
new file mode 100644
index 000000000..11839ed65
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Encoder types encode metric families into an underlying wire protocol.
+type Encoder interface {
+ Encode(*dto.MetricFamily) error
+}
+
+type encoder func(*dto.MetricFamily) error
+
+func (e encoder) Encode(v *dto.MetricFamily) error {
+ return e(v)
+}
+
+// Negotiate returns the Content-Type based on the given Accept header.
+// If no appropriate accepted type is found, FmtText is returned.
+func Negotiate(h http.Header) Format {
+ for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ // Check for protocol buffer
+ if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+ switch ac.Params["encoding"] {
+ case "delimited":
+ return FmtProtoDelim
+ case "text":
+ return FmtProtoText
+ case "compact-text":
+ return FmtProtoCompact
+ }
+ }
+ // Check for text format.
+ ver := ac.Params["version"]
+ if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+ return FmtText
+ }
+ }
+ return FmtText
+}
+
+// NewEncoder returns a new encoder based on content type negotiation.
+func NewEncoder(w io.Writer, format Format) Encoder {
+ switch format {
+ case FmtProtoDelim:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := pbutil.WriteDelimited(w, v)
+ return err
+ })
+ case FmtProtoCompact:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, v.String())
+ return err
+ })
+ case FmtProtoText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+ return err
+ })
+ case FmtText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := MetricFamilyToText(w, v)
+ return err
+ })
+ }
+ panic("expfmt.NewEncoder: unknown format")
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
new file mode 100644
index 000000000..fae10f6eb
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -0,0 +1,37 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A package for reading and writing Prometheus metrics.
+package expfmt
+
+type Format string
+
+const (
+ TextVersion = "0.0.4"
+
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+
+ // The Content-Type values for the different wire protocols.
+ FmtUnknown Format = `<unknown>`
+ FmtText Format = `text/plain; version=` + TextVersion
+ FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+ FmtProtoText Format = ProtoFmt + ` encoding=text`
+ FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+)
+
+const (
+ hdrContentType = "Content-Type"
+ hdrAccept = "Accept"
+)
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
new file mode 100644
index 000000000..dc2eedeef
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -0,0 +1,36 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Build only when actually fuzzing
+// +build gofuzz
+
+package expfmt
+
+import "bytes"
+
+// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
+//
+// go-fuzz-build github.com/prometheus/common/expfmt
+// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+//
+// Further input samples should go in the folder fuzz/corpus.
+func Fuzz(in []byte) int {
+ parser := TextParser{}
+ _, err := parser.TextToMetricFamilies(bytes.NewReader(in))
+
+ if err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0
new file mode 100644
index 000000000..139597f9c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0
@@ -0,0 +1,2 @@
+
+
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1
new file mode 100644
index 000000000..2ae870679
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1
@@ -0,0 +1,6 @@
+
+minimal_metric 1.234
+another_metric -3e3 103948
+# Even that:
+no_labels{} 3
+# HELP line for non-existing metric will be ignored.
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2
new file mode 100644
index 000000000..5c351db36
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2
@@ -0,0 +1,12 @@
+
+# A normal comment.
+#
+# TYPE name counter
+name{labelname="val1",basename="basevalue"} NaN
+name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890
+# HELP name two-line\n doc str\\ing
+
+ # HELP name2 doc str"ing 2
+ # TYPE name2 gauge
+name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321
+name2{ labelname = "val1" , }-Inf
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3
new file mode 100644
index 000000000..0b3c345aa
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3
@@ -0,0 +1,22 @@
+
+# TYPE my_summary summary
+my_summary{n1="val1",quantile="0.5"} 110
+decoy -1 -2
+my_summary{n1="val1",quantile="0.9"} 140 1
+my_summary_count{n1="val1"} 42
+# Latest timestamp wins in case of a summary.
+my_summary_sum{n1="val1"} 4711 2
+fake_sum{n1="val1"} 2001
+# TYPE another_summary summary
+another_summary_count{n2="val2",n1="val1"} 20
+my_summary_count{n2="val2",n1="val1"} 5 5
+another_summary{n1="val1",n2="val2",quantile=".3"} -1.2
+my_summary_sum{n1="val2"} 08 15
+my_summary{n1="val3", quantile="0.2"} 4711
+ my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN
+# some
+# funny comments
+# HELP
+# HELP
+# HELP my_summary
+# HELP my_summary
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4
new file mode 100644
index 000000000..bde0a387a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4
@@ -0,0 +1,10 @@
+
+# HELP request_duration_microseconds The response latency.
+# TYPE request_duration_microseconds histogram
+request_duration_microseconds_bucket{le="100"} 123
+request_duration_microseconds_bucket{le="120"} 412
+request_duration_microseconds_bucket{le="144"} 592
+request_duration_microseconds_bucket{le="172.8"} 1524
+request_duration_microseconds_bucket{le="+Inf"} 2693
+request_duration_microseconds_sum 1.7560473e+06
+request_duration_microseconds_count 2693
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0
new file mode 100644
index 000000000..4c67f9a19
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0
@@ -0,0 +1 @@
+bla 3.14 \ No newline at end of file
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1
new file mode 100644
index 000000000..b853478ee
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1
@@ -0,0 +1 @@
+metric{label="\t"} 3.14 \ No newline at end of file
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10
new file mode 100644
index 000000000..b5fe5f5a6
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10
@@ -0,0 +1 @@
+metric{label="bla"} 3.14 2 3
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11
new file mode 100644
index 000000000..57c7fbc0b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11
@@ -0,0 +1 @@
+metric{label="bla"} blubb
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12
new file mode 100644
index 000000000..0a9df79a1
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12
@@ -0,0 +1,3 @@
+
+# HELP metric one
+# HELP metric two
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13
new file mode 100644
index 000000000..5bc742781
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13
@@ -0,0 +1,3 @@
+
+# TYPE metric counter
+# TYPE metric untyped
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14
new file mode 100644
index 000000000..a9a24265b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14
@@ -0,0 +1,3 @@
+
+metric 4.12
+# TYPE metric counter
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15
new file mode 100644
index 000000000..7e95ca8f4
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15
@@ -0,0 +1,2 @@
+
+# TYPE metric bla
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16
new file mode 100644
index 000000000..7825f8887
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16
@@ -0,0 +1,2 @@
+
+# TYPE met-ric
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17
new file mode 100644
index 000000000..8f35cae0c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17
@@ -0,0 +1 @@
+@invalidmetric{label="bla"} 3.14 2 \ No newline at end of file
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18
new file mode 100644
index 000000000..7ca2cc268
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18
@@ -0,0 +1 @@
+{label="bla"} 3.14 2 \ No newline at end of file
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19
new file mode 100644
index 000000000..7a6ccc0dd
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19
@@ -0,0 +1,3 @@
+
+# TYPE metric histogram
+metric_bucket{le="bla"} 3.14
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2
new file mode 100644
index 000000000..726d0017c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2
@@ -0,0 +1,3 @@
+
+metric{label="new
+line"} 3.14
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3
new file mode 100644
index 000000000..6aa9e3081
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3
@@ -0,0 +1 @@
+metric{@="bla"} 3.14 \ No newline at end of file
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4
new file mode 100644
index 000000000..d112cb902
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4
@@ -0,0 +1 @@
+metric{__name__="bla"} 3.14 \ No newline at end of file
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5
new file mode 100644
index 000000000..b34554a8d
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5
@@ -0,0 +1 @@
+metric{label+="bla"} 3.14 \ No newline at end of file
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6
new file mode 100644
index 000000000..c4d7df3d1
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6
@@ -0,0 +1 @@
+metric{label=bla} 3.14 \ No newline at end of file
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7
new file mode 100644
index 000000000..97eafc4a6
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7
@@ -0,0 +1,3 @@
+
+# TYPE metric summary
+metric{quantile="bla"} 3.14
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8
new file mode 100644
index 000000000..fc706496b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8
@@ -0,0 +1 @@
+metric{label="bla"+} 3.14 \ No newline at end of file
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9
new file mode 100644
index 000000000..57b4879c0
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9
@@ -0,0 +1 @@
+metric{label="bla"} 3.14 2.72
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/minimal b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/minimal
new file mode 100644
index 000000000..be1e6a369
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/minimal
@@ -0,0 +1 @@
+m{} 0
diff --git a/vendor/github.com/prometheus/common/expfmt/testdata/json2 b/vendor/github.com/prometheus/common/expfmt/testdata/json2
new file mode 100644
index 000000000..b914c9386
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/testdata/json2
@@ -0,0 +1,46 @@
+[
+ {
+ "baseLabels": {
+ "__name__": "rpc_calls_total",
+ "job": "batch_job"
+ },
+ "docstring": "RPC calls.",
+ "metric": {
+ "type": "counter",
+ "value": [
+ {
+ "labels": {
+ "service": "zed"
+ },
+ "value": 25
+ },
+ {
+ "labels": {
+ "service": "bar"
+ },
+ "value": 24
+ }
+ ]
+ }
+ },
+ {
+ "baseLabels": {
+ "__name__": "rpc_latency_microseconds"
+ },
+ "docstring": "RPC latency.",
+ "metric": {
+ "type": "histogram",
+ "value": [
+ {
+ "labels": {
+ "service": "foo"
+ },
+ "value": {
+ "0.010000": 15,
+ "0.990000": 17
+ }
+ }
+ ]
+ }
+ }
+]
diff --git a/vendor/github.com/prometheus/common/expfmt/testdata/json2_bad b/vendor/github.com/prometheus/common/expfmt/testdata/json2_bad
new file mode 100644
index 000000000..cc6ac97c5
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/testdata/json2_bad
@@ -0,0 +1,46 @@
+[
+ {
+ "baseLabels": {
+ "__name__": "rpc_calls_total",
+ "job": "batch_job"
+ },
+ "docstring": "RPC calls.",
+ "metric": {
+ "type": "counter",
+ "value": [
+ {
+ "labels": {
+ "servic|e": "zed"
+ },
+ "value": 25
+ },
+ {
+ "labels": {
+ "service": "bar"
+ },
+ "value": 24
+ }
+ ]
+ }
+ },
+ {
+ "baseLabels": {
+ "__name__": "rpc_latency_microseconds"
+ },
+ "docstring": "RPC latency.",
+ "metric": {
+ "type": "histogram",
+ "value": [
+ {
+ "labels": {
+ "service": "foo"
+ },
+ "value": {
+ "0.010000": 15,
+ "0.990000": 17
+ }
+ }
+ ]
+ }
+ }
+]
diff --git a/vendor/github.com/prometheus/common/expfmt/testdata/protobuf b/vendor/github.com/prometheus/common/expfmt/testdata/protobuf
new file mode 100644
index 000000000..b2d018a7c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/testdata/protobuf
Binary files differ
diff --git a/vendor/github.com/prometheus/common/expfmt/testdata/protobuf.gz b/vendor/github.com/prometheus/common/expfmt/testdata/protobuf.gz
new file mode 100644
index 000000000..7622adb1c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/testdata/protobuf.gz
Binary files differ
diff --git a/vendor/github.com/prometheus/common/expfmt/testdata/text b/vendor/github.com/prometheus/common/expfmt/testdata/text
new file mode 100644
index 000000000..f3d8c3784
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/testdata/text
@@ -0,0 +1,322 @@
+# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
+# TYPE http_request_duration_microseconds summary
+http_request_duration_microseconds{handler="/",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/"} 0
+http_request_duration_microseconds_count{handler="/"} 0
+http_request_duration_microseconds{handler="/alerts",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/alerts",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/alerts",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/alerts"} 0
+http_request_duration_microseconds_count{handler="/alerts"} 0
+http_request_duration_microseconds{handler="/api/metrics",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/api/metrics",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/api/metrics",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/api/metrics"} 0
+http_request_duration_microseconds_count{handler="/api/metrics"} 0
+http_request_duration_microseconds{handler="/api/query",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/api/query",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/api/query",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/api/query"} 0
+http_request_duration_microseconds_count{handler="/api/query"} 0
+http_request_duration_microseconds{handler="/api/query_range",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/api/query_range",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/api/query_range",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/api/query_range"} 0
+http_request_duration_microseconds_count{handler="/api/query_range"} 0
+http_request_duration_microseconds{handler="/api/targets",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/api/targets",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/api/targets",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/api/targets"} 0
+http_request_duration_microseconds_count{handler="/api/targets"} 0
+http_request_duration_microseconds{handler="/consoles/",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/consoles/",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/consoles/",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/consoles/"} 0
+http_request_duration_microseconds_count{handler="/consoles/"} 0
+http_request_duration_microseconds{handler="/graph",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/graph",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/graph",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/graph"} 0
+http_request_duration_microseconds_count{handler="/graph"} 0
+http_request_duration_microseconds{handler="/heap",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/heap",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/heap",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/heap"} 0
+http_request_duration_microseconds_count{handler="/heap"} 0
+http_request_duration_microseconds{handler="/static/",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/static/",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/static/",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/static/"} 0
+http_request_duration_microseconds_count{handler="/static/"} 0
+http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1307.275
+http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1858.632
+http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 3087.384
+http_request_duration_microseconds_sum{handler="prometheus"} 179886.5000000001
+http_request_duration_microseconds_count{handler="prometheus"} 119
+# HELP http_request_size_bytes The HTTP request sizes in bytes.
+# TYPE http_request_size_bytes summary
+http_request_size_bytes{handler="/",quantile="0.5"} 0
+http_request_size_bytes{handler="/",quantile="0.9"} 0
+http_request_size_bytes{handler="/",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/"} 0
+http_request_size_bytes_count{handler="/"} 0
+http_request_size_bytes{handler="/alerts",quantile="0.5"} 0
+http_request_size_bytes{handler="/alerts",quantile="0.9"} 0
+http_request_size_bytes{handler="/alerts",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/alerts"} 0
+http_request_size_bytes_count{handler="/alerts"} 0
+http_request_size_bytes{handler="/api/metrics",quantile="0.5"} 0
+http_request_size_bytes{handler="/api/metrics",quantile="0.9"} 0
+http_request_size_bytes{handler="/api/metrics",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/api/metrics"} 0
+http_request_size_bytes_count{handler="/api/metrics"} 0
+http_request_size_bytes{handler="/api/query",quantile="0.5"} 0
+http_request_size_bytes{handler="/api/query",quantile="0.9"} 0
+http_request_size_bytes{handler="/api/query",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/api/query"} 0
+http_request_size_bytes_count{handler="/api/query"} 0
+http_request_size_bytes{handler="/api/query_range",quantile="0.5"} 0
+http_request_size_bytes{handler="/api/query_range",quantile="0.9"} 0
+http_request_size_bytes{handler="/api/query_range",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/api/query_range"} 0
+http_request_size_bytes_count{handler="/api/query_range"} 0
+http_request_size_bytes{handler="/api/targets",quantile="0.5"} 0
+http_request_size_bytes{handler="/api/targets",quantile="0.9"} 0
+http_request_size_bytes{handler="/api/targets",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/api/targets"} 0
+http_request_size_bytes_count{handler="/api/targets"} 0
+http_request_size_bytes{handler="/consoles/",quantile="0.5"} 0
+http_request_size_bytes{handler="/consoles/",quantile="0.9"} 0
+http_request_size_bytes{handler="/consoles/",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/consoles/"} 0
+http_request_size_bytes_count{handler="/consoles/"} 0
+http_request_size_bytes{handler="/graph",quantile="0.5"} 0
+http_request_size_bytes{handler="/graph",quantile="0.9"} 0
+http_request_size_bytes{handler="/graph",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/graph"} 0
+http_request_size_bytes_count{handler="/graph"} 0
+http_request_size_bytes{handler="/heap",quantile="0.5"} 0
+http_request_size_bytes{handler="/heap",quantile="0.9"} 0
+http_request_size_bytes{handler="/heap",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/heap"} 0
+http_request_size_bytes_count{handler="/heap"} 0
+http_request_size_bytes{handler="/static/",quantile="0.5"} 0
+http_request_size_bytes{handler="/static/",quantile="0.9"} 0
+http_request_size_bytes{handler="/static/",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/static/"} 0
+http_request_size_bytes_count{handler="/static/"} 0
+http_request_size_bytes{handler="prometheus",quantile="0.5"} 291
+http_request_size_bytes{handler="prometheus",quantile="0.9"} 291
+http_request_size_bytes{handler="prometheus",quantile="0.99"} 291
+http_request_size_bytes_sum{handler="prometheus"} 34488
+http_request_size_bytes_count{handler="prometheus"} 119
+# HELP http_requests_total Total number of HTTP requests made.
+# TYPE http_requests_total counter
+http_requests_total{code="200",handler="prometheus",method="get"} 119
+# HELP http_response_size_bytes The HTTP response sizes in bytes.
+# TYPE http_response_size_bytes summary
+http_response_size_bytes{handler="/",quantile="0.5"} 0
+http_response_size_bytes{handler="/",quantile="0.9"} 0
+http_response_size_bytes{handler="/",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/"} 0
+http_response_size_bytes_count{handler="/"} 0
+http_response_size_bytes{handler="/alerts",quantile="0.5"} 0
+http_response_size_bytes{handler="/alerts",quantile="0.9"} 0
+http_response_size_bytes{handler="/alerts",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/alerts"} 0
+http_response_size_bytes_count{handler="/alerts"} 0
+http_response_size_bytes{handler="/api/metrics",quantile="0.5"} 0
+http_response_size_bytes{handler="/api/metrics",quantile="0.9"} 0
+http_response_size_bytes{handler="/api/metrics",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/api/metrics"} 0
+http_response_size_bytes_count{handler="/api/metrics"} 0
+http_response_size_bytes{handler="/api/query",quantile="0.5"} 0
+http_response_size_bytes{handler="/api/query",quantile="0.9"} 0
+http_response_size_bytes{handler="/api/query",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/api/query"} 0
+http_response_size_bytes_count{handler="/api/query"} 0
+http_response_size_bytes{handler="/api/query_range",quantile="0.5"} 0
+http_response_size_bytes{handler="/api/query_range",quantile="0.9"} 0
+http_response_size_bytes{handler="/api/query_range",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/api/query_range"} 0
+http_response_size_bytes_count{handler="/api/query_range"} 0
+http_response_size_bytes{handler="/api/targets",quantile="0.5"} 0
+http_response_size_bytes{handler="/api/targets",quantile="0.9"} 0
+http_response_size_bytes{handler="/api/targets",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/api/targets"} 0
+http_response_size_bytes_count{handler="/api/targets"} 0
+http_response_size_bytes{handler="/consoles/",quantile="0.5"} 0
+http_response_size_bytes{handler="/consoles/",quantile="0.9"} 0
+http_response_size_bytes{handler="/consoles/",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/consoles/"} 0
+http_response_size_bytes_count{handler="/consoles/"} 0
+http_response_size_bytes{handler="/graph",quantile="0.5"} 0
+http_response_size_bytes{handler="/graph",quantile="0.9"} 0
+http_response_size_bytes{handler="/graph",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/graph"} 0
+http_response_size_bytes_count{handler="/graph"} 0
+http_response_size_bytes{handler="/heap",quantile="0.5"} 0
+http_response_size_bytes{handler="/heap",quantile="0.9"} 0
+http_response_size_bytes{handler="/heap",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/heap"} 0
+http_response_size_bytes_count{handler="/heap"} 0
+http_response_size_bytes{handler="/static/",quantile="0.5"} 0
+http_response_size_bytes{handler="/static/",quantile="0.9"} 0
+http_response_size_bytes{handler="/static/",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/static/"} 0
+http_response_size_bytes_count{handler="/static/"} 0
+http_response_size_bytes{handler="prometheus",quantile="0.5"} 2049
+http_response_size_bytes{handler="prometheus",quantile="0.9"} 2058
+http_response_size_bytes{handler="prometheus",quantile="0.99"} 2064
+http_response_size_bytes_sum{handler="prometheus"} 247001
+http_response_size_bytes_count{handler="prometheus"} 119
+# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
+# TYPE process_cpu_seconds_total counter
+process_cpu_seconds_total 0.55
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 70
+# HELP process_max_fds Maximum number of open file descriptors.
+# TYPE process_max_fds gauge
+process_max_fds 8192
+# HELP process_open_fds Number of open file descriptors.
+# TYPE process_open_fds gauge
+process_open_fds 29
+# HELP process_resident_memory_bytes Resident memory size in bytes.
+# TYPE process_resident_memory_bytes gauge
+process_resident_memory_bytes 5.3870592e+07
+# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
+# TYPE process_start_time_seconds gauge
+process_start_time_seconds 1.42236894836e+09
+# HELP process_virtual_memory_bytes Virtual memory size in bytes.
+# TYPE process_virtual_memory_bytes gauge
+process_virtual_memory_bytes 5.41478912e+08
+# HELP prometheus_dns_sd_lookup_failures_total The number of DNS-SD lookup failures.
+# TYPE prometheus_dns_sd_lookup_failures_total counter
+prometheus_dns_sd_lookup_failures_total 0
+# HELP prometheus_dns_sd_lookups_total The number of DNS-SD lookups.
+# TYPE prometheus_dns_sd_lookups_total counter
+prometheus_dns_sd_lookups_total 7
+# HELP prometheus_evaluator_duration_milliseconds The duration for all evaluations to execute.
+# TYPE prometheus_evaluator_duration_milliseconds summary
+prometheus_evaluator_duration_milliseconds{quantile="0.01"} 0
+prometheus_evaluator_duration_milliseconds{quantile="0.05"} 0
+prometheus_evaluator_duration_milliseconds{quantile="0.5"} 0
+prometheus_evaluator_duration_milliseconds{quantile="0.9"} 1
+prometheus_evaluator_duration_milliseconds{quantile="0.99"} 1
+prometheus_evaluator_duration_milliseconds_sum 12
+prometheus_evaluator_duration_milliseconds_count 23
+# HELP prometheus_local_storage_checkpoint_duration_milliseconds The duration (in milliseconds) it took to checkpoint in-memory metrics and head chunks.
+# TYPE prometheus_local_storage_checkpoint_duration_milliseconds gauge
+prometheus_local_storage_checkpoint_duration_milliseconds 0
+# HELP prometheus_local_storage_chunk_ops_total The total number of chunk operations by their type.
+# TYPE prometheus_local_storage_chunk_ops_total counter
+prometheus_local_storage_chunk_ops_total{type="create"} 598
+prometheus_local_storage_chunk_ops_total{type="persist"} 174
+prometheus_local_storage_chunk_ops_total{type="pin"} 920
+prometheus_local_storage_chunk_ops_total{type="transcode"} 415
+prometheus_local_storage_chunk_ops_total{type="unpin"} 920
+# HELP prometheus_local_storage_indexing_batch_latency_milliseconds Quantiles for batch indexing latencies in milliseconds.
+# TYPE prometheus_local_storage_indexing_batch_latency_milliseconds summary
+prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.5"} 0
+prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.9"} 0
+prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.99"} 0
+prometheus_local_storage_indexing_batch_latency_milliseconds_sum 0
+prometheus_local_storage_indexing_batch_latency_milliseconds_count 1
+# HELP prometheus_local_storage_indexing_batch_sizes Quantiles for indexing batch sizes (number of metrics per batch).
+# TYPE prometheus_local_storage_indexing_batch_sizes summary
+prometheus_local_storage_indexing_batch_sizes{quantile="0.5"} 2
+prometheus_local_storage_indexing_batch_sizes{quantile="0.9"} 2
+prometheus_local_storage_indexing_batch_sizes{quantile="0.99"} 2
+prometheus_local_storage_indexing_batch_sizes_sum 2
+prometheus_local_storage_indexing_batch_sizes_count 1
+# HELP prometheus_local_storage_indexing_queue_capacity The capacity of the indexing queue.
+# TYPE prometheus_local_storage_indexing_queue_capacity gauge
+prometheus_local_storage_indexing_queue_capacity 16384
+# HELP prometheus_local_storage_indexing_queue_length The number of metrics waiting to be indexed.
+# TYPE prometheus_local_storage_indexing_queue_length gauge
+prometheus_local_storage_indexing_queue_length 0
+# HELP prometheus_local_storage_ingested_samples_total The total number of samples ingested.
+# TYPE prometheus_local_storage_ingested_samples_total counter
+prometheus_local_storage_ingested_samples_total 30473
+# HELP prometheus_local_storage_invalid_preload_requests_total The total number of preload requests referring to a non-existent series. This is an indication of outdated label indexes.
+# TYPE prometheus_local_storage_invalid_preload_requests_total counter
+prometheus_local_storage_invalid_preload_requests_total 0
+# HELP prometheus_local_storage_memory_chunkdescs The current number of chunk descriptors in memory.
+# TYPE prometheus_local_storage_memory_chunkdescs gauge
+prometheus_local_storage_memory_chunkdescs 1059
+# HELP prometheus_local_storage_memory_chunks The current number of chunks in memory, excluding cloned chunks (i.e. chunks without a descriptor).
+# TYPE prometheus_local_storage_memory_chunks gauge
+prometheus_local_storage_memory_chunks 1020
+# HELP prometheus_local_storage_memory_series The current number of series in memory.
+# TYPE prometheus_local_storage_memory_series gauge
+prometheus_local_storage_memory_series 424
+# HELP prometheus_local_storage_persist_latency_microseconds A summary of latencies for persisting each chunk.
+# TYPE prometheus_local_storage_persist_latency_microseconds summary
+prometheus_local_storage_persist_latency_microseconds{quantile="0.5"} 30.377
+prometheus_local_storage_persist_latency_microseconds{quantile="0.9"} 203.539
+prometheus_local_storage_persist_latency_microseconds{quantile="0.99"} 2626.463
+prometheus_local_storage_persist_latency_microseconds_sum 20424.415
+prometheus_local_storage_persist_latency_microseconds_count 174
+# HELP prometheus_local_storage_persist_queue_capacity The total capacity of the persist queue.
+# TYPE prometheus_local_storage_persist_queue_capacity gauge
+prometheus_local_storage_persist_queue_capacity 1024
+# HELP prometheus_local_storage_persist_queue_length The current number of chunks waiting in the persist queue.
+# TYPE prometheus_local_storage_persist_queue_length gauge
+prometheus_local_storage_persist_queue_length 0
+# HELP prometheus_local_storage_series_ops_total The total number of series operations by their type.
+# TYPE prometheus_local_storage_series_ops_total counter
+prometheus_local_storage_series_ops_total{type="create"} 2
+prometheus_local_storage_series_ops_total{type="maintenance_in_memory"} 11
+# HELP prometheus_notifications_latency_milliseconds Latency quantiles for sending alert notifications (not including dropped notifications).
+# TYPE prometheus_notifications_latency_milliseconds summary
+prometheus_notifications_latency_milliseconds{quantile="0.5"} 0
+prometheus_notifications_latency_milliseconds{quantile="0.9"} 0
+prometheus_notifications_latency_milliseconds{quantile="0.99"} 0
+prometheus_notifications_latency_milliseconds_sum 0
+prometheus_notifications_latency_milliseconds_count 0
+# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue.
+# TYPE prometheus_notifications_queue_capacity gauge
+prometheus_notifications_queue_capacity 100
+# HELP prometheus_notifications_queue_length The number of alert notifications in the queue.
+# TYPE prometheus_notifications_queue_length gauge
+prometheus_notifications_queue_length 0
+# HELP prometheus_rule_evaluation_duration_milliseconds The duration for a rule to execute.
+# TYPE prometheus_rule_evaluation_duration_milliseconds summary
+prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.5"} 0
+prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.9"} 0
+prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.99"} 2
+prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="alerting"} 12
+prometheus_rule_evaluation_duration_milliseconds_count{rule_type="alerting"} 115
+prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.5"} 0
+prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.9"} 0
+prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.99"} 3
+prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="recording"} 15
+prometheus_rule_evaluation_duration_milliseconds_count{rule_type="recording"} 115
+# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures.
+# TYPE prometheus_rule_evaluation_failures_total counter
+prometheus_rule_evaluation_failures_total 0
+# HELP prometheus_samples_queue_capacity Capacity of the queue for unwritten samples.
+# TYPE prometheus_samples_queue_capacity gauge
+prometheus_samples_queue_capacity 4096
+# HELP prometheus_samples_queue_length Current number of items in the queue for unwritten samples. Each item comprises all samples exposed by one target as one metric family (i.e. metrics of the same name).
+# TYPE prometheus_samples_queue_length gauge
+prometheus_samples_queue_length 0
+# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes.
+# TYPE prometheus_target_interval_length_seconds summary
+prometheus_target_interval_length_seconds{interval="15s",quantile="0.01"} 14
+prometheus_target_interval_length_seconds{interval="15s",quantile="0.05"} 14
+prometheus_target_interval_length_seconds{interval="15s",quantile="0.5"} 15
+prometheus_target_interval_length_seconds{interval="15s",quantile="0.9"} 15
+prometheus_target_interval_length_seconds{interval="15s",quantile="0.99"} 15
+prometheus_target_interval_length_seconds_sum{interval="15s"} 175
+prometheus_target_interval_length_seconds_count{interval="15s"} 12
+prometheus_target_interval_length_seconds{interval="1s",quantile="0.01"} 0
+prometheus_target_interval_length_seconds{interval="1s",quantile="0.05"} 0
+prometheus_target_interval_length_seconds{interval="1s",quantile="0.5"} 0
+prometheus_target_interval_length_seconds{interval="1s",quantile="0.9"} 1
+prometheus_target_interval_length_seconds{interval="1s",quantile="0.99"} 1
+prometheus_target_interval_length_seconds_sum{interval="1s"} 55
+prometheus_target_interval_length_seconds_count{interval="1s"} 117
diff --git a/vendor/github.com/prometheus/common/expfmt/testdata/text.gz b/vendor/github.com/prometheus/common/expfmt/testdata/text.gz
new file mode 100644
index 000000000..b7658c84d
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/testdata/text.gz
Binary files differ
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
new file mode 100644
index 000000000..f11321cd0
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -0,0 +1,303 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+)
+
+// MetricFamilyToText converts a MetricFamily proto message into text format and
+// writes the resulting lines to 'out'. It returns the number of bytes written
+// and any error encountered. The output will have the same order as the input,
+// no further sorting is performed. Furthermore, this function assumes the input
+// is already sanitized and does not perform any sanity checks. If the input
+// contains duplicate metrics or invalid metric or label names, the conversion
+// will result in invalid text format output.
+//
+// This method fulfills the type 'prometheus.encoder'.
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
+ var written int
+
+ // Fail-fast checks.
+ if len(in.Metric) == 0 {
+ return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
+ }
+ name := in.GetName()
+ if name == "" {
+ return written, fmt.Errorf("MetricFamily has no name: %s", in)
+ }
+
+ // Comments, first HELP, then TYPE.
+ if in.Help != nil {
+ n, err := fmt.Fprintf(
+ out, "# HELP %s %s\n",
+ name, escapeString(*in.Help, false),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ metricType := in.GetType()
+ n, err := fmt.Fprintf(
+ out, "# TYPE %s %s\n",
+ name, strings.ToLower(metricType.String()),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ // Finally the samples, one line for each.
+ for _, metric := range in.Metric {
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ if metric.Counter == nil {
+ return written, fmt.Errorf(
+ "expected counter in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Counter.GetValue(),
+ out,
+ )
+ case dto.MetricType_GAUGE:
+ if metric.Gauge == nil {
+ return written, fmt.Errorf(
+ "expected gauge in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Gauge.GetValue(),
+ out,
+ )
+ case dto.MetricType_UNTYPED:
+ if metric.Untyped == nil {
+ return written, fmt.Errorf(
+ "expected untyped in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Untyped.GetValue(),
+ out,
+ )
+ case dto.MetricType_SUMMARY:
+ if metric.Summary == nil {
+ return written, fmt.Errorf(
+ "expected summary in metric %s %s", name, metric,
+ )
+ }
+ for _, q := range metric.Summary.Quantile {
+ n, err = writeSample(
+ name, metric,
+ model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
+ q.GetValue(),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Summary.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Summary.GetSampleCount()),
+ out,
+ )
+ case dto.MetricType_HISTOGRAM:
+ if metric.Histogram == nil {
+ return written, fmt.Errorf(
+ "expected histogram in metric %s %s", name, metric,
+ )
+ }
+ infSeen := false
+ for _, q := range metric.Histogram.Bucket {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
+ float64(q.GetCumulativeCount()),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+ }
+ if !infSeen {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, "+Inf",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Histogram.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ default:
+ return written, fmt.Errorf(
+ "unexpected type in metric %s %s", name, metric,
+ )
+ }
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+}
+
+// writeSample writes a single sample in text format to out, given the metric
+// name, the metric proto message itself, optionally an additional label name
+// and value (use empty strings if not required), and the value. The function
+// returns the number of bytes written and any error encountered.
+func writeSample(
+ name string,
+ metric *dto.Metric,
+ additionalLabelName, additionalLabelValue string,
+ value float64,
+ out io.Writer,
+) (int, error) {
+ var written int
+ n, err := fmt.Fprint(out, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = labelPairsToText(
+ metric.Label,
+ additionalLabelName, additionalLabelValue,
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = fmt.Fprintf(out, " %v", value)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if metric.TimestampMs != nil {
+ n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = out.Write([]byte{'\n'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// labelPairsToText converts a slice of LabelPair proto messages plus the
+// explicitly given additional label pair into text formatted as required by the
+// text format and writes it to 'out'. An empty slice in combination with an
+// empty string 'additionalLabelName' results in nothing being
+// written. Otherwise, the label pairs are written, escaped as required by the
+// text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered.
+func labelPairsToText(
+ in []*dto.LabelPair,
+ additionalLabelName, additionalLabelValue string,
+ out io.Writer,
+) (int, error) {
+ if len(in) == 0 && additionalLabelName == "" {
+ return 0, nil
+ }
+ var written int
+ separator := '{'
+ for _, lp := range in {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, lp.GetName(), escapeString(lp.GetValue(), true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ if additionalLabelName != "" {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, additionalLabelName,
+ escapeString(additionalLabelValue, true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err := out.Write([]byte{'}'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+var (
+ escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
+ escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
+)
+
+// escapeString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
+func escapeString(v string, includeDoubleQuote bool) string {
+ if includeDoubleQuote {
+ return escapeWithDoubleQuote.Replace(v)
+ }
+
+ return escape.Replace(v)
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create_test.go b/vendor/github.com/prometheus/common/expfmt/text_create_test.go
new file mode 100644
index 000000000..e4cc5d803
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_create_test.go
@@ -0,0 +1,443 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bytes"
+ "math"
+ "strings"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+func testCreate(t testing.TB) {
+ var scenarios = []struct {
+ in *dto.MetricFamily
+ out string
+ }{
+ // 0: Counter, NaN as value, timestamp given.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("two-line\n doc str\\ing"),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("labelname"),
+ Value: proto.String("val1"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("basename"),
+ Value: proto.String("basevalue"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(math.NaN()),
+ },
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("labelname"),
+ Value: proto.String("val2"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("basename"),
+ Value: proto.String("basevalue"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(.23),
+ },
+ TimestampMs: proto.Int64(1234567890),
+ },
+ },
+ },
+ out: `# HELP name two-line\n doc str\\ing
+# TYPE name counter
+name{labelname="val1",basename="basevalue"} NaN
+name{labelname="val2",basename="basevalue"} 0.23 1234567890
+`,
+ },
+ // 1: Gauge, some escaping required, +Inf as value, multi-byte characters in label values.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("gauge_name"),
+ Help: proto.String("gauge\ndoc\nstr\"ing"),
+ Type: dto.MetricType_GAUGE.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("name_1"),
+ Value: proto.String("val with\nnew line"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("name_2"),
+ Value: proto.String("val with \\backslash and \"quotes\""),
+ },
+ },
+ Gauge: &dto.Gauge{
+ Value: proto.Float64(math.Inf(+1)),
+ },
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("name_1"),
+ Value: proto.String("Björn"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("name_2"),
+ Value: proto.String("佖佥"),
+ },
+ },
+ Gauge: &dto.Gauge{
+ Value: proto.Float64(3.14E42),
+ },
+ },
+ },
+ },
+ out: `# HELP gauge_name gauge\ndoc\nstr"ing
+# TYPE gauge_name gauge
+gauge_name{name_1="val with\nnew line",name_2="val with \\backslash and \"quotes\""} +Inf
+gauge_name{name_1="Björn",name_2="佖佥"} 3.14e+42
+`,
+ },
+ // 2: Untyped, no help, one sample with no labels and -Inf as value, another sample with one label.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("untyped_name"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(math.Inf(-1)),
+ },
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("name_1"),
+ Value: proto.String("value 1"),
+ },
+ },
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(-1.23e-45),
+ },
+ },
+ },
+ },
+ out: `# TYPE untyped_name untyped
+untyped_name -Inf
+untyped_name{name_1="value 1"} -1.23e-45
+`,
+ },
+ // 3: Summary.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("summary_name"),
+ Help: proto.String("summary docstring"),
+ Type: dto.MetricType_SUMMARY.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Summary: &dto.Summary{
+ SampleCount: proto.Uint64(42),
+ SampleSum: proto.Float64(-3.4567),
+ Quantile: []*dto.Quantile{
+ &dto.Quantile{
+ Quantile: proto.Float64(0.5),
+ Value: proto.Float64(-1.23),
+ },
+ &dto.Quantile{
+ Quantile: proto.Float64(0.9),
+ Value: proto.Float64(.2342354),
+ },
+ &dto.Quantile{
+ Quantile: proto.Float64(0.99),
+ Value: proto.Float64(0),
+ },
+ },
+ },
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("name_1"),
+ Value: proto.String("value 1"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("name_2"),
+ Value: proto.String("value 2"),
+ },
+ },
+ Summary: &dto.Summary{
+ SampleCount: proto.Uint64(4711),
+ SampleSum: proto.Float64(2010.1971),
+ Quantile: []*dto.Quantile{
+ &dto.Quantile{
+ Quantile: proto.Float64(0.5),
+ Value: proto.Float64(1),
+ },
+ &dto.Quantile{
+ Quantile: proto.Float64(0.9),
+ Value: proto.Float64(2),
+ },
+ &dto.Quantile{
+ Quantile: proto.Float64(0.99),
+ Value: proto.Float64(3),
+ },
+ },
+ },
+ },
+ },
+ },
+ out: `# HELP summary_name summary docstring
+# TYPE summary_name summary
+summary_name{quantile="0.5"} -1.23
+summary_name{quantile="0.9"} 0.2342354
+summary_name{quantile="0.99"} 0
+summary_name_sum -3.4567
+summary_name_count 42
+summary_name{name_1="value 1",name_2="value 2",quantile="0.5"} 1
+summary_name{name_1="value 1",name_2="value 2",quantile="0.9"} 2
+summary_name{name_1="value 1",name_2="value 2",quantile="0.99"} 3
+summary_name_sum{name_1="value 1",name_2="value 2"} 2010.1971
+summary_name_count{name_1="value 1",name_2="value 2"} 4711
+`,
+ },
+ // 4: Histogram
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("request_duration_microseconds"),
+ Help: proto.String("The response latency."),
+ Type: dto.MetricType_HISTOGRAM.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Histogram: &dto.Histogram{
+ SampleCount: proto.Uint64(2693),
+ SampleSum: proto.Float64(1756047.3),
+ Bucket: []*dto.Bucket{
+ &dto.Bucket{
+ UpperBound: proto.Float64(100),
+ CumulativeCount: proto.Uint64(123),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(120),
+ CumulativeCount: proto.Uint64(412),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(144),
+ CumulativeCount: proto.Uint64(592),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(172.8),
+ CumulativeCount: proto.Uint64(1524),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(math.Inf(+1)),
+ CumulativeCount: proto.Uint64(2693),
+ },
+ },
+ },
+ },
+ },
+ },
+ out: `# HELP request_duration_microseconds The response latency.
+# TYPE request_duration_microseconds histogram
+request_duration_microseconds_bucket{le="100"} 123
+request_duration_microseconds_bucket{le="120"} 412
+request_duration_microseconds_bucket{le="144"} 592
+request_duration_microseconds_bucket{le="172.8"} 1524
+request_duration_microseconds_bucket{le="+Inf"} 2693
+request_duration_microseconds_sum 1.7560473e+06
+request_duration_microseconds_count 2693
+`,
+ },
+ // 5: Histogram with missing +Inf bucket.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("request_duration_microseconds"),
+ Help: proto.String("The response latency."),
+ Type: dto.MetricType_HISTOGRAM.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Histogram: &dto.Histogram{
+ SampleCount: proto.Uint64(2693),
+ SampleSum: proto.Float64(1756047.3),
+ Bucket: []*dto.Bucket{
+ &dto.Bucket{
+ UpperBound: proto.Float64(100),
+ CumulativeCount: proto.Uint64(123),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(120),
+ CumulativeCount: proto.Uint64(412),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(144),
+ CumulativeCount: proto.Uint64(592),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(172.8),
+ CumulativeCount: proto.Uint64(1524),
+ },
+ },
+ },
+ },
+ },
+ },
+ out: `# HELP request_duration_microseconds The response latency.
+# TYPE request_duration_microseconds histogram
+request_duration_microseconds_bucket{le="100"} 123
+request_duration_microseconds_bucket{le="120"} 412
+request_duration_microseconds_bucket{le="144"} 592
+request_duration_microseconds_bucket{le="172.8"} 1524
+request_duration_microseconds_bucket{le="+Inf"} 2693
+request_duration_microseconds_sum 1.7560473e+06
+request_duration_microseconds_count 2693
+`,
+ },
+ // 6: No metric type, should result in default type Counter.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("doc string"),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Counter: &dto.Counter{
+ Value: proto.Float64(math.Inf(-1)),
+ },
+ },
+ },
+ },
+ out: `# HELP name doc string
+# TYPE name counter
+name -Inf
+`,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ out := bytes.NewBuffer(make([]byte, 0, len(scenario.out)))
+ n, err := MetricFamilyToText(out, scenario.in)
+ if err != nil {
+ t.Errorf("%d. error: %s", i, err)
+ continue
+ }
+ if expected, got := len(scenario.out), n; expected != got {
+ t.Errorf(
+ "%d. expected %d bytes written, got %d",
+ i, expected, got,
+ )
+ }
+ if expected, got := scenario.out, out.String(); expected != got {
+ t.Errorf(
+ "%d. expected out=%q, got %q",
+ i, expected, got,
+ )
+ }
+ }
+
+}
+
+func TestCreate(t *testing.T) {
+ testCreate(t)
+}
+
+func BenchmarkCreate(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testCreate(b)
+ }
+}
+
+func testCreateError(t testing.TB) {
+ var scenarios = []struct {
+ in *dto.MetricFamily
+ err string
+ }{
+ // 0: No metric.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("doc string"),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{},
+ },
+ err: "MetricFamily has no metrics",
+ },
+ // 1: No metric name.
+ {
+ in: &dto.MetricFamily{
+ Help: proto.String("doc string"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(math.Inf(-1)),
+ },
+ },
+ },
+ },
+ err: "MetricFamily has no name",
+ },
+ // 2: Wrong type.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("doc string"),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(math.Inf(-1)),
+ },
+ },
+ },
+ },
+ err: "expected counter in metric",
+ },
+ }
+
+ for i, scenario := range scenarios {
+ var out bytes.Buffer
+ _, err := MetricFamilyToText(&out, scenario.in)
+ if err == nil {
+ t.Errorf("%d. expected error, got nil", i)
+ continue
+ }
+ if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 {
+ t.Errorf(
+ "%d. expected error starting with %q, got %q",
+ i, expected, got,
+ )
+ }
+ }
+
+}
+
+func TestCreateError(t *testing.T) {
+ testCreateError(t)
+}
+
+func BenchmarkCreateError(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testCreateError(b)
+ }
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
new file mode 100644
index 000000000..ef9a15077
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -0,0 +1,753 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
+)
+
+// A stateFn is a function that represents a state in a state machine. By
+// executing it, the state is progressed to the next state. The stateFn returns
+// another stateFn, which represents the new state. The end state is represented
+// by nil.
+type stateFn func() stateFn
+
+// ParseError signals errors while parsing the simple and flat text-based
+// exchange format.
+type ParseError struct {
+ Line int
+ Msg string
+}
+
+// Error implements the error interface.
+func (e ParseError) Error() string {
+ return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
+}
+
+// TextParser is used to parse the simple and flat text-based exchange format. Its
+// zero value is ready to use.
+type TextParser struct {
+ metricFamiliesByName map[string]*dto.MetricFamily
+ buf *bufio.Reader // Where the parsed input is read through.
+ err error // Most recent error.
+ lineCount int // Tracks the line count for error messages.
+ currentByte byte // The most recent byte read.
+ currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
+ currentMF *dto.MetricFamily
+ currentMetric *dto.Metric
+ currentLabelPair *dto.LabelPair
+
+ // The remaining member variables are only used for summaries/histograms.
+ currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
+ // Summary specific.
+ summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentQuantile float64
+ // Histogram specific.
+ histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentBucket float64
+ // These tell us if the currently processed line ends on '_count' or
+ // '_sum' respectively and belong to a summary/histogram, representing the sample
+ // count and sum of that summary/histogram.
+ currentIsSummaryCount, currentIsSummarySum bool
+ currentIsHistogramCount, currentIsHistogramSum bool
+}
+
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
+// format and creates MetricFamily proto messages. It returns the MetricFamily
+// proto messages in a map where the metric names are the keys, along with any
+// error encountered.
+//
+// If the input contains duplicate metrics (i.e. lines with the same metric name
+// and exactly the same label set), the resulting MetricFamily will contain
+// duplicate Metric proto messages. Similar is true for duplicate label
+// names. Checks for duplicates have to be performed separately, if required.
+// Also note that neither the metrics within each MetricFamily are sorted nor
+// the label pairs within each Metric. Sorting is not required for the most
+// frequent use of this method, which is sample ingestion in the Prometheus
+// server. However, for presentation purposes, you might want to sort the
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
+// the metric family injection hook of the Prometheus registry.
+//
+// Summaries and histograms are rather special beasts. You would probably not
+// use them in the simple text format anyway. This method can deal with
+// summaries and histograms if they are presented in exactly the way the
+// text.Create function creates them.
+//
+// This method must not be called concurrently. If you want to parse different
+// input concurrently, instantiate a separate Parser for each goroutine.
+func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
+ p.reset(in)
+ for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
+ // Magic happens here...
+ }
+ // Get rid of empty metric families.
+ for k, mf := range p.metricFamiliesByName {
+ if len(mf.GetMetric()) == 0 {
+ delete(p.metricFamiliesByName, k)
+ }
+ }
+ // If p.err is io.EOF now, we have run into a premature end of the input
+ // stream. Turn this error into something nicer and more
+ // meaningful. (io.EOF is often used as a signal for the legitimate end
+ // of an input stream.)
+ if p.err == io.EOF {
+ p.parseError("unexpected end of input stream")
+ }
+ return p.metricFamiliesByName, p.err
+}
+
+func (p *TextParser) reset(in io.Reader) {
+ p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+ if p.buf == nil {
+ p.buf = bufio.NewReader(in)
+ } else {
+ p.buf.Reset(in)
+ }
+ p.err = nil
+ p.lineCount = 0
+ if p.summaries == nil || len(p.summaries) > 0 {
+ p.summaries = map[uint64]*dto.Metric{}
+ }
+ if p.histograms == nil || len(p.histograms) > 0 {
+ p.histograms = map[uint64]*dto.Metric{}
+ }
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+}
+
+// startOfLine represents the state where the next byte read from p.buf is the
+// start of a line (or whitespace leading up to it).
+func (p *TextParser) startOfLine() stateFn {
+ p.lineCount++
+ if p.skipBlankTab(); p.err != nil {
+ // End of input reached. This is the only case where
+ // that is not an error but a signal that we are done.
+ p.err = nil
+ return nil
+ }
+ switch p.currentByte {
+ case '#':
+ return p.startComment
+ case '\n':
+ return p.startOfLine // Empty line, start the next one.
+ }
+ return p.readingMetricName
+}
+
+// startComment represents the state where the next byte read from p.buf is the
+// start of a comment (or whitespace leading up to it).
+func (p *TextParser) startComment() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ // If we have hit the end of line already, there is nothing left
+ // to do. This is not considered a syntax error.
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ keyword := p.currentToken.String()
+ if keyword != "HELP" && keyword != "TYPE" {
+ // Generic comment, ignore by fast forwarding to end of line.
+ for p.currentByte != '\n' {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ }
+ return p.startOfLine
+ }
+ // There is something. Next has to be a metric name.
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ if !isBlankOrTab(p.currentByte) {
+ p.parseError("invalid metric name in comment")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ switch keyword {
+ case "HELP":
+ return p.readingHelp
+ case "TYPE":
+ return p.readingType
+ }
+ panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
+}
+
+// readingMetricName represents the state where the last byte read (now in
+// p.currentByte) is the first byte of a metric name.
+func (p *TextParser) readingMetricName() stateFn {
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError("invalid metric name")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ // Now is the time to fix the type if it hasn't happened yet.
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ // Do not append the newly created currentMetric to
+ // currentMF.Metric right now. First wait if this is a summary,
+ // and the metric exists already, which we can only know after
+ // having read all the labels.
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingLabels
+}
+
+// readingLabels represents the state where the last byte read (now in
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
+// first byte of the value (otherwise).
+func (p *TextParser) readingLabels() stateFn {
+ // Summaries/histograms are special. We have to reset the
+ // currentLabels map, currentQuantile and currentBucket before starting to
+ // read labels.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ p.currentLabels = map[string]string{}
+ p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+ }
+ if p.currentByte != '{' {
+ return p.readingValue
+ }
+ return p.startLabelName
+}
+
+// startLabelName represents the state where the next byte read from p.buf is
+// the start of a label name (or whitespace leading up to it).
+func (p *TextParser) startLabelName() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '}' {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ }
+ if p.readTokenAsLabelName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+ if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+ p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+ return nil
+ }
+ // Special summary/histogram treatment. Don't add 'quantile' and 'le'
+ // labels to 'real' labels.
+ if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
+ !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
+ }
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '=' {
+ p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+ return nil
+ }
+ return p.startLabelValue
+}
+
+// startLabelValue represents the state where the next byte read from p.buf is
+// the start of a (quoted) label value (or whitespace leading up to it).
+func (p *TextParser) startLabelValue() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '"' {
+ p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
+ return nil
+ }
+ if p.readTokenAsLabelValue(); p.err != nil {
+ return nil
+ }
+ p.currentLabelPair.Value = proto.String(p.currentToken.String())
+ // Special treatment of summaries:
+ // - Quantile labels are special, will result in dto.Quantile later.
+ // - Other labels have to be added to currentLabels for signature calculation.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if p.currentLabelPair.GetName() == model.QuantileLabel {
+ if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ // Similar special treatment of histograms.
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if p.currentLabelPair.GetName() == model.BucketLabel {
+ if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ switch p.currentByte {
+ case ',':
+ return p.startLabelName
+
+ case '}':
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ default:
+ p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
+ return nil
+ }
+}
+
+// readingValue represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the sample value (i.e. a float).
+func (p *TextParser) readingValue() stateFn {
+ // When we are here, we have read all the labels, so for the
+ // special case of a summary/histogram, we can finally find out
+ // if the metric already exists.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if summary := p.summaries[signature]; summary != nil {
+ p.currentMetric = summary
+ } else {
+ p.summaries[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if histogram := p.histograms[signature]; histogram != nil {
+ p.currentMetric = histogram
+ } else {
+ p.histograms[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else {
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ value, err := strconv.ParseFloat(p.currentToken.String(), 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
+ return nil
+ }
+ switch p.currentMF.GetType() {
+ case dto.MetricType_COUNTER:
+ p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
+ case dto.MetricType_GAUGE:
+ p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
+ case dto.MetricType_UNTYPED:
+ p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
+ case dto.MetricType_SUMMARY:
+ // *sigh*
+ if p.currentMetric.Summary == nil {
+ p.currentMetric.Summary = &dto.Summary{}
+ }
+ switch {
+ case p.currentIsSummaryCount:
+ p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsSummarySum:
+ p.currentMetric.Summary.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentQuantile):
+ p.currentMetric.Summary.Quantile = append(
+ p.currentMetric.Summary.Quantile,
+ &dto.Quantile{
+ Quantile: proto.Float64(p.currentQuantile),
+ Value: proto.Float64(value),
+ },
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ // *sigh*
+ if p.currentMetric.Histogram == nil {
+ p.currentMetric.Histogram = &dto.Histogram{}
+ }
+ switch {
+ case p.currentIsHistogramCount:
+ p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsHistogramSum:
+ p.currentMetric.Histogram.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentBucket):
+ p.currentMetric.Histogram.Bucket = append(
+ p.currentMetric.Histogram.Bucket,
+ &dto.Bucket{
+ UpperBound: proto.Float64(p.currentBucket),
+ CumulativeCount: proto.Uint64(uint64(value)),
+ },
+ )
+ }
+ default:
+ p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ return p.startTimestamp
+}
+
+// startTimestamp represents the state where the next byte read from p.buf is
+// the start of the timestamp (or whitespace leading up to it).
+func (p *TextParser) startTimestamp() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMetric.TimestampMs = proto.Int64(timestamp)
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() > 0 {
+ p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
+ return nil
+ }
+ return p.startOfLine
+}
+
+// readingHelp represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the docstring after 'HELP'.
+func (p *TextParser) readingHelp() stateFn {
+ if p.currentMF.Help != nil {
+ p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the docstring.
+ if p.readTokenUntilNewline(true); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ p.currentMF.Help = proto.String(p.currentToken.String())
+ return p.startOfLine
+}
+
+// readingType represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the type hint after 'HELP'.
+func (p *TextParser) readingType() stateFn {
+ if p.currentMF.Type != nil {
+ p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the type.
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
+ if !ok {
+ p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMF.Type = dto.MetricType(metricType).Enum()
+ return p.startOfLine
+}
+
+// parseError sets p.err to a ParseError at the current line with the given
+// message.
+func (p *TextParser) parseError(msg string) {
+ p.err = ParseError{
+ Line: p.lineCount,
+ Msg: msg,
+ }
+}
+
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
+func (p *TextParser) skipBlankTab() {
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
+ return
+ }
+ }
+}
+
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
+// anything if p.currentByte is neither ' ' nor '\t'.
+func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
+ if isBlankOrTab(p.currentByte) {
+ p.skipBlankTab()
+ }
+}
+
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
+// first byte considered is the byte already read (now in p.currentByte). The
+// first whitespace byte encountered is still copied into p.currentByte, but not
+// into p.currentToken.
+func (p *TextParser) readTokenUntilWhitespace() {
+ p.currentToken.Reset()
+ for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
+// byte considered is the byte already read (now in p.currentByte). The first
+// newline byte encountered is still copied into p.currentByte, but not into
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
+// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
+// other escape sequences are invalid and cause an error.
+func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
+ p.currentToken.Reset()
+ escaped := false
+ for p.err == nil {
+ if recognizeEscapeSequence && escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '\n':
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a metric name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsMetricName() {
+ p.currentToken.Reset()
+ if !isValidMetricNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a label name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelName() {
+ p.currentToken.Reset()
+ if !isValidLabelNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
+// In contrast to the other 'readTokenAs...' functions, which start with the
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
+// with reading a new byte from p.buf. The first byte not part of a label value
+// is still copied into p.currentByte, but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelValue() {
+ p.currentToken.Reset()
+ escaped := false
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return
+ }
+ if escaped {
+ switch p.currentByte {
+ case '"', '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ continue
+ }
+ switch p.currentByte {
+ case '"':
+ return
+ case '\n':
+ p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+}
+
+func (p *TextParser) setOrCreateCurrentMF() {
+ p.currentIsSummaryCount = false
+ p.currentIsSummarySum = false
+ p.currentIsHistogramCount = false
+ p.currentIsHistogramSum = false
+ name := p.currentToken.String()
+ if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
+ return
+ }
+ // Try out if this is a _sum or _count for a summary/histogram.
+ summaryName := summaryMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if isCount(name) {
+ p.currentIsSummaryCount = true
+ }
+ if isSum(name) {
+ p.currentIsSummarySum = true
+ }
+ return
+ }
+ }
+ histogramName := histogramMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if isCount(name) {
+ p.currentIsHistogramCount = true
+ }
+ if isSum(name) {
+ p.currentIsHistogramSum = true
+ }
+ return
+ }
+ }
+ p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
+ p.metricFamiliesByName[name] = p.currentMF
+}
+
+func isValidLabelNameStart(b byte) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+}
+
+func isValidLabelNameContinuation(b byte) bool {
+ return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+}
+
+func isValidMetricNameStart(b byte) bool {
+ return isValidLabelNameStart(b) || b == ':'
+}
+
+func isValidMetricNameContinuation(b byte) bool {
+ return isValidLabelNameContinuation(b) || b == ':'
+}
+
+func isBlankOrTab(b byte) bool {
+ return b == ' ' || b == '\t'
+}
+
+func isCount(name string) bool {
+ return len(name) > 6 && name[len(name)-6:] == "_count"
+}
+
+func isSum(name string) bool {
+ return len(name) > 4 && name[len(name)-4:] == "_sum"
+}
+
+func isBucket(name string) bool {
+ return len(name) > 7 && name[len(name)-7:] == "_bucket"
+}
+
+func summaryMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ default:
+ return name
+ }
+}
+
+func histogramMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ case isBucket(name):
+ return name[:len(name)-7]
+ default:
+ return name
+ }
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse_test.go b/vendor/github.com/prometheus/common/expfmt/text_parse_test.go
new file mode 100644
index 000000000..7e7388ce9
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse_test.go
@@ -0,0 +1,588 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "math"
+ "strings"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ dto "github.com/prometheus/client_model/go"
+)
+
+func testTextParse(t testing.TB) {
+ var scenarios = []struct {
+ in string
+ out []*dto.MetricFamily
+ }{
+ // 0: Empty lines as input.
+ {
+ in: `
+
+`,
+ out: []*dto.MetricFamily{},
+ },
+ // 1: Minimal case.
+ {
+ in: `
+minimal_metric 1.234
+another_metric -3e3 103948
+# Even that:
+no_labels{} 3
+# HELP line for non-existing metric will be ignored.
+`,
+ out: []*dto.MetricFamily{
+ &dto.MetricFamily{
+ Name: proto.String("minimal_metric"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(1.234),
+ },
+ },
+ },
+ },
+ &dto.MetricFamily{
+ Name: proto.String("another_metric"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(-3e3),
+ },
+ TimestampMs: proto.Int64(103948),
+ },
+ },
+ },
+ &dto.MetricFamily{
+ Name: proto.String("no_labels"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(3),
+ },
+ },
+ },
+ },
+ },
+ },
+ // 2: Counters & gauges, docstrings, various whitespace, escape sequences.
+ {
+ in: `
+# A normal comment.
+#
+# TYPE name counter
+name{labelname="val1",basename="basevalue"} NaN
+name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890
+# HELP name two-line\n doc str\\ing
+
+ # HELP name2 doc str"ing 2
+ # TYPE name2 gauge
+name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321
+name2{ labelname = "val1" , }-Inf
+`,
+ out: []*dto.MetricFamily{
+ &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("two-line\n doc str\\ing"),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("labelname"),
+ Value: proto.String("val1"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("basename"),
+ Value: proto.String("basevalue"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(math.NaN()),
+ },
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("labelname"),
+ Value: proto.String("val2"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("basename"),
+ Value: proto.String("base\"v\\al\nue"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(.23),
+ },
+ TimestampMs: proto.Int64(1234567890),
+ },
+ },
+ },
+ &dto.MetricFamily{
+ Name: proto.String("name2"),
+ Help: proto.String("doc str\"ing 2"),
+ Type: dto.MetricType_GAUGE.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("labelname"),
+ Value: proto.String("val2"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("basename"),
+ Value: proto.String("basevalue2"),
+ },
+ },
+ Gauge: &dto.Gauge{
+ Value: proto.Float64(math.Inf(+1)),
+ },
+ TimestampMs: proto.Int64(54321),
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("labelname"),
+ Value: proto.String("val1"),
+ },
+ },
+ Gauge: &dto.Gauge{
+ Value: proto.Float64(math.Inf(-1)),
+ },
+ },
+ },
+ },
+ },
+ },
+ // 3: The evil summary, mixed with other types and funny comments.
+ {
+ in: `
+# TYPE my_summary summary
+my_summary{n1="val1",quantile="0.5"} 110
+decoy -1 -2
+my_summary{n1="val1",quantile="0.9"} 140 1
+my_summary_count{n1="val1"} 42
+# Latest timestamp wins in case of a summary.
+my_summary_sum{n1="val1"} 4711 2
+fake_sum{n1="val1"} 2001
+# TYPE another_summary summary
+another_summary_count{n2="val2",n1="val1"} 20
+my_summary_count{n2="val2",n1="val1"} 5 5
+another_summary{n1="val1",n2="val2",quantile=".3"} -1.2
+my_summary_sum{n1="val2"} 08 15
+my_summary{n1="val3", quantile="0.2"} 4711
+ my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN
+# some
+# funny comments
+# HELP
+# HELP
+# HELP my_summary
+# HELP my_summary
+`,
+ out: []*dto.MetricFamily{
+ &dto.MetricFamily{
+ Name: proto.String("fake_sum"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("n1"),
+ Value: proto.String("val1"),
+ },
+ },
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(2001),
+ },
+ },
+ },
+ },
+ &dto.MetricFamily{
+ Name: proto.String("decoy"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(-1),
+ },
+ TimestampMs: proto.Int64(-2),
+ },
+ },
+ },
+ &dto.MetricFamily{
+ Name: proto.String("my_summary"),
+ Type: dto.MetricType_SUMMARY.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("n1"),
+ Value: proto.String("val1"),
+ },
+ },
+ Summary: &dto.Summary{
+ SampleCount: proto.Uint64(42),
+ SampleSum: proto.Float64(4711),
+ Quantile: []*dto.Quantile{
+ &dto.Quantile{
+ Quantile: proto.Float64(0.5),
+ Value: proto.Float64(110),
+ },
+ &dto.Quantile{
+ Quantile: proto.Float64(0.9),
+ Value: proto.Float64(140),
+ },
+ },
+ },
+ TimestampMs: proto.Int64(2),
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("n2"),
+ Value: proto.String("val2"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("n1"),
+ Value: proto.String("val1"),
+ },
+ },
+ Summary: &dto.Summary{
+ SampleCount: proto.Uint64(5),
+ Quantile: []*dto.Quantile{
+ &dto.Quantile{
+ Quantile: proto.Float64(-12.34),
+ Value: proto.Float64(math.NaN()),
+ },
+ },
+ },
+ TimestampMs: proto.Int64(5),
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("n1"),
+ Value: proto.String("val2"),
+ },
+ },
+ Summary: &dto.Summary{
+ SampleSum: proto.Float64(8),
+ },
+ TimestampMs: proto.Int64(15),
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("n1"),
+ Value: proto.String("val3"),
+ },
+ },
+ Summary: &dto.Summary{
+ Quantile: []*dto.Quantile{
+ &dto.Quantile{
+ Quantile: proto.Float64(0.2),
+ Value: proto.Float64(4711),
+ },
+ },
+ },
+ },
+ },
+ },
+ &dto.MetricFamily{
+ Name: proto.String("another_summary"),
+ Type: dto.MetricType_SUMMARY.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("n2"),
+ Value: proto.String("val2"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("n1"),
+ Value: proto.String("val1"),
+ },
+ },
+ Summary: &dto.Summary{
+ SampleCount: proto.Uint64(20),
+ Quantile: []*dto.Quantile{
+ &dto.Quantile{
+ Quantile: proto.Float64(0.3),
+ Value: proto.Float64(-1.2),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ // 4: The histogram.
+ {
+ in: `
+# HELP request_duration_microseconds The response latency.
+# TYPE request_duration_microseconds histogram
+request_duration_microseconds_bucket{le="100"} 123
+request_duration_microseconds_bucket{le="120"} 412
+request_duration_microseconds_bucket{le="144"} 592
+request_duration_microseconds_bucket{le="172.8"} 1524
+request_duration_microseconds_bucket{le="+Inf"} 2693
+request_duration_microseconds_sum 1.7560473e+06
+request_duration_microseconds_count 2693
+`,
+ out: []*dto.MetricFamily{
+ {
+ Name: proto.String("request_duration_microseconds"),
+ Help: proto.String("The response latency."),
+ Type: dto.MetricType_HISTOGRAM.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Histogram: &dto.Histogram{
+ SampleCount: proto.Uint64(2693),
+ SampleSum: proto.Float64(1756047.3),
+ Bucket: []*dto.Bucket{
+ &dto.Bucket{
+ UpperBound: proto.Float64(100),
+ CumulativeCount: proto.Uint64(123),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(120),
+ CumulativeCount: proto.Uint64(412),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(144),
+ CumulativeCount: proto.Uint64(592),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(172.8),
+ CumulativeCount: proto.Uint64(1524),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(math.Inf(+1)),
+ CumulativeCount: proto.Uint64(2693),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for i, scenario := range scenarios {
+ out, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in))
+ if err != nil {
+ t.Errorf("%d. error: %s", i, err)
+ continue
+ }
+ if expected, got := len(scenario.out), len(out); expected != got {
+ t.Errorf(
+ "%d. expected %d MetricFamilies, got %d",
+ i, expected, got,
+ )
+ }
+ for _, expected := range scenario.out {
+ got, ok := out[expected.GetName()]
+ if !ok {
+ t.Errorf(
+ "%d. expected MetricFamily %q, found none",
+ i, expected.GetName(),
+ )
+ continue
+ }
+ if expected.String() != got.String() {
+ t.Errorf(
+ "%d. expected MetricFamily %s, got %s",
+ i, expected, got,
+ )
+ }
+ }
+ }
+}
+
+func TestTextParse(t *testing.T) {
+ testTextParse(t)
+}
+
+func BenchmarkTextParse(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testTextParse(b)
+ }
+}
+
+func testTextParseError(t testing.TB) {
+ var scenarios = []struct {
+ in string
+ err string
+ }{
+ // 0: No new-line at end of input.
+ {
+ in: `
+bla 3.14
+blubber 42`,
+ err: "text format parsing error in line 3: unexpected end of input stream",
+ },
+ // 1: Invalid escape sequence in label value.
+ {
+ in: `metric{label="\t"} 3.14`,
+ err: "text format parsing error in line 1: invalid escape sequence",
+ },
+ // 2: Newline in label value.
+ {
+ in: `
+metric{label="new
+line"} 3.14
+`,
+ err: `text format parsing error in line 2: label value "new" contains unescaped new-line`,
+ },
+ // 3:
+ {
+ in: `metric{@="bla"} 3.14`,
+ err: "text format parsing error in line 1: invalid label name for metric",
+ },
+ // 4:
+ {
+ in: `metric{__name__="bla"} 3.14`,
+ err: `text format parsing error in line 1: label name "__name__" is reserved`,
+ },
+ // 5:
+ {
+ in: `metric{label+="bla"} 3.14`,
+ err: "text format parsing error in line 1: expected '=' after label name",
+ },
+ // 6:
+ {
+ in: `metric{label=bla} 3.14`,
+ err: "text format parsing error in line 1: expected '\"' at start of label value",
+ },
+ // 7:
+ {
+ in: `
+# TYPE metric summary
+metric{quantile="bla"} 3.14
+`,
+ err: "text format parsing error in line 3: expected float as value for 'quantile' label",
+ },
+ // 8:
+ {
+ in: `metric{label="bla"+} 3.14`,
+ err: "text format parsing error in line 1: unexpected end of label value",
+ },
+ // 9:
+ {
+ in: `metric{label="bla"} 3.14 2.72
+`,
+ err: "text format parsing error in line 1: expected integer as timestamp",
+ },
+ // 10:
+ {
+ in: `metric{label="bla"} 3.14 2 3
+`,
+ err: "text format parsing error in line 1: spurious string after timestamp",
+ },
+ // 11:
+ {
+ in: `metric{label="bla"} blubb
+`,
+ err: "text format parsing error in line 1: expected float as value",
+ },
+ // 12:
+ {
+ in: `
+# HELP metric one
+# HELP metric two
+`,
+ err: "text format parsing error in line 3: second HELP line for metric name",
+ },
+ // 13:
+ {
+ in: `
+# TYPE metric counter
+# TYPE metric untyped
+`,
+ err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`,
+ },
+ // 14:
+ {
+ in: `
+metric 4.12
+# TYPE metric counter
+`,
+ err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`,
+ },
+ // 14:
+ {
+ in: `
+# TYPE metric bla
+`,
+ err: "text format parsing error in line 2: unknown metric type",
+ },
+ // 15:
+ {
+ in: `
+# TYPE met-ric
+`,
+ err: "text format parsing error in line 2: invalid metric name in comment",
+ },
+ // 16:
+ {
+ in: `@invalidmetric{label="bla"} 3.14 2`,
+ err: "text format parsing error in line 1: invalid metric name",
+ },
+ // 17:
+ {
+ in: `{label="bla"} 3.14 2`,
+ err: "text format parsing error in line 1: invalid metric name",
+ },
+ // 18:
+ {
+ in: `
+# TYPE metric histogram
+metric_bucket{le="bla"} 3.14
+`,
+ err: "text format parsing error in line 3: expected float as value for 'le' label",
+ },
+ }
+
+ for i, scenario := range scenarios {
+ _, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in))
+ if err == nil {
+ t.Errorf("%d. expected error, got nil", i)
+ continue
+ }
+ if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 {
+ t.Errorf(
+ "%d. expected error starting with %q, got %q",
+ i, expected, got,
+ )
+ }
+ }
+
+}
+
+func TestTextParseError(t *testing.T) {
+ testTextParseError(t)
+}
+
+func BenchmarkParseError(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testTextParseError(b)
+ }
+}
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
new file mode 100644
index 000000000..7723656d5
--- /dev/null
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+ Type, SubType string
+ Q float32
+ Params map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+ .hg
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
new file mode 100644
index 000000000..648b38cb6
--- /dev/null
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
@@ -0,0 +1,162 @@
+/*
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+*/
+package goautoneg
+
+import (
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+ Type, SubType string
+ Q float64
+ Params map[string]string
+}
+
+// For internal use, so that we can use the sort interface
+type accept_slice []Accept
+
+func (accept accept_slice) Len() int {
+ slice := []Accept(accept)
+ return len(slice)
+}
+
+func (accept accept_slice) Less(i, j int) bool {
+ slice := []Accept(accept)
+ ai, aj := slice[i], slice[j]
+ if ai.Q > aj.Q {
+ return true
+ }
+ if ai.Type != "*" && aj.Type == "*" {
+ return true
+ }
+ if ai.SubType != "*" && aj.SubType == "*" {
+ return true
+ }
+ return false
+}
+
+func (accept accept_slice) Swap(i, j int) {
+ slice := []Accept(accept)
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) (accept []Accept) {
+ parts := strings.Split(header, ",")
+ accept = make([]Accept, 0, len(parts))
+ for _, part := range parts {
+ part := strings.Trim(part, " ")
+
+ a := Accept{}
+ a.Params = make(map[string]string)
+ a.Q = 1.0
+
+ mrp := strings.Split(part, ";")
+
+ media_range := mrp[0]
+ sp := strings.Split(media_range, "/")
+ a.Type = strings.Trim(sp[0], " ")
+
+ switch {
+ case len(sp) == 1 && a.Type == "*":
+ a.SubType = "*"
+ case len(sp) == 2:
+ a.SubType = strings.Trim(sp[1], " ")
+ default:
+ continue
+ }
+
+ if len(mrp) == 1 {
+ accept = append(accept, a)
+ continue
+ }
+
+ for _, param := range mrp[1:] {
+ sp := strings.SplitN(param, "=", 2)
+ if len(sp) != 2 {
+ continue
+ }
+ token := strings.Trim(sp[0], " ")
+ if token == "q" {
+ a.Q, _ = strconv.ParseFloat(sp[1], 32)
+ } else {
+ a.Params[token] = strings.Trim(sp[1], " ")
+ }
+ }
+
+ accept = append(accept, a)
+ }
+
+ slice := accept_slice(accept)
+ sort.Sort(slice)
+
+ return
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+ asp := make([][]string, 0, len(alternatives))
+ for _, ctype := range alternatives {
+ asp = append(asp, strings.SplitN(ctype, "/", 2))
+ }
+ for _, clause := range ParseAccept(header) {
+ for i, ctsp := range asp {
+ if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == ctsp[0] && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == "*" && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go
new file mode 100644
index 000000000..41d328f1d
--- /dev/null
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go
@@ -0,0 +1,33 @@
+package goautoneg
+
+import (
+ "testing"
+)
+
+var chrome = "application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5"
+
+func TestParseAccept(t *testing.T) {
+ alternatives := []string{"text/html", "image/png"}
+ content_type := Negotiate(chrome, alternatives)
+ if content_type != "image/png" {
+ t.Errorf("got %s expected image/png", content_type)
+ }
+
+ alternatives = []string{"text/html", "text/plain", "text/n3"}
+ content_type = Negotiate(chrome, alternatives)
+ if content_type != "text/html" {
+ t.Errorf("got %s expected text/html", content_type)
+ }
+
+ alternatives = []string{"text/n3", "text/plain"}
+ content_type = Negotiate(chrome, alternatives)
+ if content_type != "text/plain" {
+ t.Errorf("got %s expected text/plain", content_type)
+ }
+
+ alternatives = []string{"text/n3", "application/rdf+xml"}
+ content_type = Negotiate(chrome, alternatives)
+ if content_type != "text/n3" {
+ t.Errorf("got %s expected text/n3", content_type)
+ }
+}
diff --git a/vendor/github.com/prometheus/common/log/eventlog_formatter.go b/vendor/github.com/prometheus/common/log/eventlog_formatter.go
new file mode 100644
index 000000000..6d41284ce
--- /dev/null
+++ b/vendor/github.com/prometheus/common/log/eventlog_formatter.go
@@ -0,0 +1,89 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build windows
+
+package log
+
+import (
+ "fmt"
+ "os"
+
+ "golang.org/x/sys/windows/svc/eventlog"
+
+ "github.com/Sirupsen/logrus"
+)
+
+func init() {
+ setEventlogFormatter = func(name string, debugAsInfo bool) error {
+ if name == "" {
+ return fmt.Errorf("missing name parameter")
+ }
+
+ fmter, err := newEventlogger(name, debugAsInfo, origLogger.Formatter)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error creating eventlog formatter: %v\n", err)
+ origLogger.Errorf("can't connect logger to eventlog: %v", err)
+ return err
+ }
+ origLogger.Formatter = fmter
+ return nil
+ }
+}
+
+type eventlogger struct {
+ log *eventlog.Log
+ debugAsInfo bool
+ wrap logrus.Formatter
+}
+
+func newEventlogger(name string, debugAsInfo bool, fmter logrus.Formatter) (*eventlogger, error) {
+ logHandle, err := eventlog.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ return &eventlogger{log: logHandle, debugAsInfo: debugAsInfo, wrap: fmter}, nil
+}
+
+func (s *eventlogger) Format(e *logrus.Entry) ([]byte, error) {
+ data, err := s.wrap.Format(e)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "eventlogger: can't format entry: %v\n", err)
+ return data, err
+ }
+
+ switch e.Level {
+ case logrus.PanicLevel:
+ fallthrough
+ case logrus.FatalLevel:
+ fallthrough
+ case logrus.ErrorLevel:
+ err = s.log.Error(102, e.Message)
+ case logrus.WarnLevel:
+ err = s.log.Warning(101, e.Message)
+ case logrus.InfoLevel:
+ err = s.log.Info(100, e.Message)
+ case logrus.DebugLevel:
+ if s.debugAsInfo {
+ err = s.log.Info(100, e.Message)
+ }
+ default:
+ err = s.log.Info(100, e.Message)
+ }
+
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "eventlogger: can't send log to eventlog: %v\n", err)
+ }
+
+ return data, err
+}
diff --git a/vendor/github.com/prometheus/common/log/log.go b/vendor/github.com/prometheus/common/log/log.go
new file mode 100644
index 000000000..efad4842f
--- /dev/null
+++ b/vendor/github.com/prometheus/common/log/log.go
@@ -0,0 +1,365 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/url"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+
+ "github.com/Sirupsen/logrus"
+)
+
+type levelFlag string
+
+// String implements flag.Value.
+func (f levelFlag) String() string {
+ return fmt.Sprintf("%q", string(f))
+}
+
+// Set implements flag.Value.
+func (f levelFlag) Set(level string) error {
+ l, err := logrus.ParseLevel(level)
+ if err != nil {
+ return err
+ }
+ origLogger.Level = l
+ return nil
+}
+
+// setSyslogFormatter is nil if the target architecture does not support syslog.
+var setSyslogFormatter func(string, string) error
+
+// setEventlogFormatter is nil if the target OS does not support Eventlog (i.e., is not Windows).
+var setEventlogFormatter func(string, bool) error
+
+func setJSONFormatter() {
+ origLogger.Formatter = &logrus.JSONFormatter{}
+}
+
+type logFormatFlag url.URL
+
+// String implements flag.Value.
+func (f logFormatFlag) String() string {
+ u := url.URL(f)
+ return fmt.Sprintf("%q", u.String())
+}
+
+// Set implements flag.Value.
+func (f logFormatFlag) Set(format string) error {
+ u, err := url.Parse(format)
+ if err != nil {
+ return err
+ }
+ if u.Scheme != "logger" {
+ return fmt.Errorf("invalid scheme %s", u.Scheme)
+ }
+ jsonq := u.Query().Get("json")
+ if jsonq == "true" {
+ setJSONFormatter()
+ }
+
+ switch u.Opaque {
+ case "syslog":
+ if setSyslogFormatter == nil {
+ return fmt.Errorf("system does not support syslog")
+ }
+ appname := u.Query().Get("appname")
+ facility := u.Query().Get("local")
+ return setSyslogFormatter(appname, facility)
+ case "eventlog":
+ if setEventlogFormatter == nil {
+ return fmt.Errorf("system does not support eventlog")
+ }
+ name := u.Query().Get("name")
+ debugAsInfo := false
+ debugAsInfoRaw := u.Query().Get("debugAsInfo")
+ if parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil {
+ debugAsInfo = parsedDebugAsInfo
+ }
+ return setEventlogFormatter(name, debugAsInfo)
+ case "stdout":
+ origLogger.Out = os.Stdout
+ case "stderr":
+ origLogger.Out = os.Stderr
+ default:
+ return fmt.Errorf("unsupported logger %q", u.Opaque)
+ }
+ return nil
+}
+
+func init() {
+ AddFlags(flag.CommandLine)
+}
+
+// AddFlags adds the flags used by this package to the given FlagSet. That's
+// useful if working with a custom FlagSet. The init function of this package
+// adds the flags to flag.CommandLine anyway. Thus, it's usually enough to call
+// flag.Parse() to make the logging flags take effect.
+func AddFlags(fs *flag.FlagSet) {
+ fs.Var(
+ levelFlag(origLogger.Level.String()),
+ "log.level",
+ "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]",
+ )
+ fs.Var(
+ logFormatFlag(url.URL{Scheme: "logger", Opaque: "stderr"}),
+ "log.format",
+ `Set the log target and format. Example: "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"`,
+ )
+}
+
+// Logger is the interface for loggers used in the Prometheus components.
+type Logger interface {
+ Debug(...interface{})
+ Debugln(...interface{})
+ Debugf(string, ...interface{})
+
+ Info(...interface{})
+ Infoln(...interface{})
+ Infof(string, ...interface{})
+
+ Warn(...interface{})
+ Warnln(...interface{})
+ Warnf(string, ...interface{})
+
+ Error(...interface{})
+ Errorln(...interface{})
+ Errorf(string, ...interface{})
+
+ Fatal(...interface{})
+ Fatalln(...interface{})
+ Fatalf(string, ...interface{})
+
+ With(key string, value interface{}) Logger
+}
+
+type logger struct {
+ entry *logrus.Entry
+}
+
+func (l logger) With(key string, value interface{}) Logger {
+ return logger{l.entry.WithField(key, value)}
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func (l logger) Debug(args ...interface{}) {
+ l.sourced().Debug(args...)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func (l logger) Debugln(args ...interface{}) {
+ l.sourced().Debugln(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func (l logger) Debugf(format string, args ...interface{}) {
+ l.sourced().Debugf(format, args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func (l logger) Info(args ...interface{}) {
+ l.sourced().Info(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func (l logger) Infoln(args ...interface{}) {
+ l.sourced().Infoln(args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func (l logger) Infof(format string, args ...interface{}) {
+ l.sourced().Infof(format, args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func (l logger) Warn(args ...interface{}) {
+ l.sourced().Warn(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func (l logger) Warnln(args ...interface{}) {
+ l.sourced().Warnln(args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func (l logger) Warnf(format string, args ...interface{}) {
+ l.sourced().Warnf(format, args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func (l logger) Error(args ...interface{}) {
+ l.sourced().Error(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func (l logger) Errorln(args ...interface{}) {
+ l.sourced().Errorln(args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func (l logger) Errorf(format string, args ...interface{}) {
+ l.sourced().Errorf(format, args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func (l logger) Fatal(args ...interface{}) {
+ l.sourced().Fatal(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func (l logger) Fatalln(args ...interface{}) {
+ l.sourced().Fatalln(args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func (l logger) Fatalf(format string, args ...interface{}) {
+ l.sourced().Fatalf(format, args...)
+}
+
+// sourced adds a source field to the logger that contains
+// the file name and line where the logging happened.
+func (l logger) sourced() *logrus.Entry {
+ _, file, line, ok := runtime.Caller(2)
+ if !ok {
+ file = "<???>"
+ line = 1
+ } else {
+ slash := strings.LastIndex(file, "/")
+ file = file[slash+1:]
+ }
+ return l.entry.WithField("source", fmt.Sprintf("%s:%d", file, line))
+}
+
+var origLogger = logrus.New()
+var baseLogger = logger{entry: logrus.NewEntry(origLogger)}
+
+// Base returns the default Logger logging to
+func Base() Logger {
+ return baseLogger
+}
+
+// NewLogger returns a new Logger logging to out.
+func NewLogger(w io.Writer) Logger {
+ l := logrus.New()
+ l.Out = w
+ return logger{entry: logrus.NewEntry(l)}
+}
+
+// NewNopLogger returns a logger that discards all log messages.
+func NewNopLogger() Logger {
+ l := logrus.New()
+ l.Out = ioutil.Discard
+ return logger{entry: logrus.NewEntry(l)}
+}
+
+// With adds a field to the logger.
+func With(key string, value interface{}) Logger {
+ return baseLogger.With(key, value)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ baseLogger.sourced().Debug(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ baseLogger.sourced().Debugln(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ baseLogger.sourced().Debugf(format, args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ baseLogger.sourced().Info(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ baseLogger.sourced().Infoln(args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ baseLogger.sourced().Infof(format, args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ baseLogger.sourced().Warn(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ baseLogger.sourced().Warnln(args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ baseLogger.sourced().Warnf(format, args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ baseLogger.sourced().Error(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ baseLogger.sourced().Errorln(args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ baseLogger.sourced().Errorf(format, args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ baseLogger.sourced().Fatal(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ baseLogger.sourced().Fatalln(args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ baseLogger.sourced().Fatalf(format, args...)
+}
+
+type errorLogWriter struct{}
+
+func (errorLogWriter) Write(b []byte) (int, error) {
+ baseLogger.sourced().Error(string(b))
+ return len(b), nil
+}
+
+// NewErrorLogger returns a log.Logger that is meant to be used
+// in the ErrorLog field of an http.Server to log HTTP server errors.
+func NewErrorLogger() *log.Logger {
+ return log.New(&errorLogWriter{}, "", 0)
+}
diff --git a/vendor/github.com/prometheus/common/log/log_test.go b/vendor/github.com/prometheus/common/log/log_test.go
new file mode 100644
index 000000000..953adb79c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/log/log_test.go
@@ -0,0 +1,39 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+import (
+ "bytes"
+ "regexp"
+ "testing"
+
+ "github.com/Sirupsen/logrus"
+)
+
+func TestFileLineLogging(t *testing.T) {
+ var buf bytes.Buffer
+ origLogger.Out = &buf
+ origLogger.Formatter = &logrus.TextFormatter{
+ DisableColors: true,
+ }
+
+ // The default logging level should be "info".
+ Debug("This debug-level line should not show up in the output.")
+ Infof("This %s-level line should show up in the output.", "info")
+
+ re := `^time=".*" level=info msg="This info-level line should show up in the output." source="log_test.go:33" \n$`
+ if !regexp.MustCompile(re).Match(buf.Bytes()) {
+ t.Fatalf("%q did not match expected regex %q", buf.String(), re)
+ }
+}
diff --git a/vendor/github.com/prometheus/common/log/syslog_formatter.go b/vendor/github.com/prometheus/common/log/syslog_formatter.go
new file mode 100644
index 000000000..fd8c6fbee
--- /dev/null
+++ b/vendor/github.com/prometheus/common/log/syslog_formatter.go
@@ -0,0 +1,119 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows,!nacl,!plan9
+
+package log
+
+import (
+ "fmt"
+ "log/syslog"
+ "os"
+
+ "github.com/Sirupsen/logrus"
+)
+
+func init() {
+ setSyslogFormatter = func(appname, local string) error {
+ if appname == "" {
+ return fmt.Errorf("missing appname parameter")
+ }
+ if local == "" {
+ return fmt.Errorf("missing local parameter")
+ }
+
+ fmter, err := newSyslogger(appname, local, origLogger.Formatter)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error creating syslog formatter: %v\n", err)
+ origLogger.Errorf("can't connect logger to syslog: %v", err)
+ return err
+ }
+ origLogger.Formatter = fmter
+ return nil
+ }
+}
+
+var ceeTag = []byte("@cee:")
+
+type syslogger struct {
+ wrap logrus.Formatter
+ out *syslog.Writer
+}
+
+func newSyslogger(appname string, facility string, fmter logrus.Formatter) (*syslogger, error) {
+ priority, err := getFacility(facility)
+ if err != nil {
+ return nil, err
+ }
+ out, err := syslog.New(priority, appname)
+ return &syslogger{
+ out: out,
+ wrap: fmter,
+ }, err
+}
+
+func getFacility(facility string) (syslog.Priority, error) {
+ switch facility {
+ case "0":
+ return syslog.LOG_LOCAL0, nil
+ case "1":
+ return syslog.LOG_LOCAL1, nil
+ case "2":
+ return syslog.LOG_LOCAL2, nil
+ case "3":
+ return syslog.LOG_LOCAL3, nil
+ case "4":
+ return syslog.LOG_LOCAL4, nil
+ case "5":
+ return syslog.LOG_LOCAL5, nil
+ case "6":
+ return syslog.LOG_LOCAL6, nil
+ case "7":
+ return syslog.LOG_LOCAL7, nil
+ }
+ return syslog.LOG_LOCAL0, fmt.Errorf("invalid local(%s) for syslog", facility)
+}
+
+func (s *syslogger) Format(e *logrus.Entry) ([]byte, error) {
+ data, err := s.wrap.Format(e)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "syslogger: can't format entry: %v\n", err)
+ return data, err
+ }
+ // only append tag to data sent to syslog (line), not to what
+ // is returned
+ line := string(append(ceeTag, data...))
+
+ switch e.Level {
+ case logrus.PanicLevel:
+ err = s.out.Crit(line)
+ case logrus.FatalLevel:
+ err = s.out.Crit(line)
+ case logrus.ErrorLevel:
+ err = s.out.Err(line)
+ case logrus.WarnLevel:
+ err = s.out.Warning(line)
+ case logrus.InfoLevel:
+ err = s.out.Info(line)
+ case logrus.DebugLevel:
+ err = s.out.Debug(line)
+ default:
+ err = s.out.Notice(line)
+ }
+
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "syslogger: can't send log to syslog: %v\n", err)
+ }
+
+ return data, err
+}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
new file mode 100644
index 000000000..35e739c7a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -0,0 +1,136 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "time"
+)
+
+type AlertStatus string
+
+const (
+ AlertFiring AlertStatus = "firing"
+ AlertResolved AlertStatus = "resolved"
+)
+
+// Alert is a generic representation of an alert in the Prometheus eco-system.
+type Alert struct {
+ // Label value pairs for purpose of aggregation, matching, and disposition
+ // dispatching. This must minimally include an "alertname" label.
+ Labels LabelSet `json:"labels"`
+
+ // Extra key/value information which does not define alert identity.
+ Annotations LabelSet `json:"annotations"`
+
+ // The known time range for this alert. Both ends are optional.
+ StartsAt time.Time `json:"startsAt,omitempty"`
+ EndsAt time.Time `json:"endsAt,omitempty"`
+ GeneratorURL string `json:"generatorURL"`
+}
+
+// Name returns the name of the alert. It is equivalent to the "alertname" label.
+func (a *Alert) Name() string {
+ return string(a.Labels[AlertNameLabel])
+}
+
+// Fingerprint returns a unique hash for the alert. It is equivalent to
+// the fingerprint of the alert's label set.
+func (a *Alert) Fingerprint() Fingerprint {
+ return a.Labels.Fingerprint()
+}
+
+func (a *Alert) String() string {
+ s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
+ if a.Resolved() {
+ return s + "[resolved]"
+ }
+ return s + "[active]"
+}
+
+// Resolved returns true iff the activity interval ended in the past.
+func (a *Alert) Resolved() bool {
+ return a.ResolvedAt(time.Now())
+}
+
+// ResolvedAt returns true off the activity interval ended before
+// the given timestamp.
+func (a *Alert) ResolvedAt(ts time.Time) bool {
+ if a.EndsAt.IsZero() {
+ return false
+ }
+ return !a.EndsAt.After(ts)
+}
+
+// Status returns the status of the alert.
+func (a *Alert) Status() AlertStatus {
+ if a.Resolved() {
+ return AlertResolved
+ }
+ return AlertFiring
+}
+
+// Validate checks whether the alert data is inconsistent.
+func (a *Alert) Validate() error {
+ if a.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if err := a.Labels.Validate(); err != nil {
+ return fmt.Errorf("invalid label set: %s", err)
+ }
+ if len(a.Labels) == 0 {
+ return fmt.Errorf("at least one label pair required")
+ }
+ if err := a.Annotations.Validate(); err != nil {
+ return fmt.Errorf("invalid annotations: %s", err)
+ }
+ return nil
+}
+
+// Alert is a list of alerts that can be sorted in chronological order.
+type Alerts []*Alert
+
+func (as Alerts) Len() int { return len(as) }
+func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
+
+func (as Alerts) Less(i, j int) bool {
+ if as[i].StartsAt.Before(as[j].StartsAt) {
+ return true
+ }
+ if as[i].EndsAt.Before(as[j].EndsAt) {
+ return true
+ }
+ return as[i].Fingerprint() < as[j].Fingerprint()
+}
+
+// HasFiring returns true iff one of the alerts is not resolved.
+func (as Alerts) HasFiring() bool {
+ for _, a := range as {
+ if !a.Resolved() {
+ return true
+ }
+ }
+ return false
+}
+
+// Status returns StatusFiring iff at least one of the alerts is firing.
+func (as Alerts) Status() AlertStatus {
+ if as.HasFiring() {
+ return AlertFiring
+ }
+ return AlertResolved
+}
diff --git a/vendor/github.com/prometheus/common/model/alert_test.go b/vendor/github.com/prometheus/common/model/alert_test.go
new file mode 100644
index 000000000..9692bca21
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/alert_test.go
@@ -0,0 +1,118 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestAlertValidate(t *testing.T) {
+ ts := time.Now()
+
+ var cases = []struct {
+ alert *Alert
+ err string
+ }{
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ StartsAt: ts,
+ },
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ },
+ err: "start time missing",
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ StartsAt: ts,
+ EndsAt: ts,
+ },
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ StartsAt: ts,
+ EndsAt: ts.Add(1 * time.Minute),
+ },
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ StartsAt: ts,
+ EndsAt: ts.Add(-1 * time.Minute),
+ },
+ err: "start time must be before end time",
+ },
+ {
+ alert: &Alert{
+ StartsAt: ts,
+ },
+ err: "at least one label pair required",
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b", "!bad": "label"},
+ StartsAt: ts,
+ },
+ err: "invalid label set: invalid name",
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b", "bad": "\xfflabel"},
+ StartsAt: ts,
+ },
+ err: "invalid label set: invalid value",
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ Annotations: LabelSet{"!bad": "label"},
+ StartsAt: ts,
+ },
+ err: "invalid annotations: invalid name",
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ Annotations: LabelSet{"bad": "\xfflabel"},
+ StartsAt: ts,
+ },
+ err: "invalid annotations: invalid value",
+ },
+ }
+
+ for i, c := range cases {
+ err := c.alert.Validate()
+ if err == nil {
+ if c.err == "" {
+ continue
+ }
+ t.Errorf("%d. Expected error %q but got none", i, c.err)
+ continue
+ }
+ if c.err == "" && err != nil {
+ t.Errorf("%d. Expected no error but got %q", i, err)
+ continue
+ }
+ if !strings.Contains(err.Error(), c.err) {
+ t.Errorf("%d. Expected error to contain %q but got %q", i, c.err, err)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go
new file mode 100644
index 000000000..fc4de4106
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fingerprinting.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// Fingerprint provides a hash-capable representation of a Metric.
+// For our purposes, FNV-1A 64-bit is used.
+type Fingerprint uint64
+
+// FingerprintFromString transforms a string representation into a Fingerprint.
+func FingerprintFromString(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ return Fingerprint(num), err
+}
+
+// ParseFingerprint parses the input string into a fingerprint.
+func ParseFingerprint(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return 0, err
+ }
+ return Fingerprint(num), nil
+}
+
+func (f Fingerprint) String() string {
+ return fmt.Sprintf("%016x", uint64(f))
+}
+
+// Fingerprints represents a collection of Fingerprint subject to a given
+// natural sorting scheme. It implements sort.Interface.
+type Fingerprints []Fingerprint
+
+// Len implements sort.Interface.
+func (f Fingerprints) Len() int {
+ return len(f)
+}
+
+// Less implements sort.Interface.
+func (f Fingerprints) Less(i, j int) bool {
+ return f[i] < f[j]
+}
+
+// Swap implements sort.Interface.
+func (f Fingerprints) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+// FingerprintSet is a set of Fingerprints.
+type FingerprintSet map[Fingerprint]struct{}
+
+// Equal returns true if both sets contain the same elements (and not more).
+func (s FingerprintSet) Equal(o FingerprintSet) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for k := range s {
+ if _, ok := o[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection returns the elements contained in both sets.
+func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
+ myLength, otherLength := len(s), len(o)
+ if myLength == 0 || otherLength == 0 {
+ return FingerprintSet{}
+ }
+
+ subSet := s
+ superSet := o
+
+ if otherLength < myLength {
+ subSet = o
+ superSet = s
+ }
+
+ out := FingerprintSet{}
+
+ for k := range subSet {
+ if _, ok := superSet[k]; ok {
+ out[k] = struct{}{}
+ }
+ }
+
+ return out
+}
diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go
new file mode 100644
index 000000000..038fc1c90
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
new file mode 100644
index 000000000..41051a01a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -0,0 +1,210 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strings"
+ "unicode/utf8"
+)
+
+const (
+ // AlertNameLabel is the name of the label containing the an alert's name.
+ AlertNameLabel = "alertname"
+
+ // ExportedLabelPrefix is the prefix to prepend to the label names present in
+ // exported metrics if a label of the same name is added by the server.
+ ExportedLabelPrefix = "exported_"
+
+ // MetricNameLabel is the label name indicating the metric name of a
+ // timeseries.
+ MetricNameLabel = "__name__"
+
+ // SchemeLabel is the name of the label that holds the scheme on which to
+ // scrape a target.
+ SchemeLabel = "__scheme__"
+
+ // AddressLabel is the name of the label that holds the address of
+ // a scrape target.
+ AddressLabel = "__address__"
+
+ // MetricsPathLabel is the name of the label that holds the path on which to
+ // scrape a target.
+ MetricsPathLabel = "__metrics_path__"
+
+ // ReservedLabelPrefix is a prefix which is not legal in user-supplied
+ // label names.
+ ReservedLabelPrefix = "__"
+
+ // MetaLabelPrefix is a prefix for labels that provide meta information.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series.
+ MetaLabelPrefix = "__meta_"
+
+ // TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series. This is reserved for use in
+ // Prometheus configuration files by users.
+ TmpLabelPrefix = "__tmp_"
+
+ // ParamLabelPrefix is a prefix for labels that provide URL parameters
+ // used to scrape a target.
+ ParamLabelPrefix = "__param_"
+
+ // JobLabel is the label name indicating the job from which a timeseries
+ // was scraped.
+ JobLabel = "job"
+
+ // InstanceLabel is the label name used for the instance label.
+ InstanceLabel = "instance"
+
+ // BucketLabel is used for the label that defines the upper bound of a
+ // bucket of a histogram ("le" -> "less or equal").
+ BucketLabel = "le"
+
+ // QuantileLabel is used for the label that defines the quantile in a
+ // summary.
+ QuantileLabel = "quantile"
+)
+
+// LabelNameRE is a regular expression matching valid label names. Note that the
+// IsValid method of LabelName performs the same check but faster than a match
+// with this regular expression.
+var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+// A LabelName is a key for a LabelSet or Metric. It has a value associated
+// therewith.
+type LabelName string
+
+// IsValid is true iff the label name matches the pattern of LabelNameRE. This
+// method, however, does not use LabelNameRE for the check but a much faster
+// hardcoded implementation.
+func (ln LabelName) IsValid() bool {
+ if len(ln) == 0 {
+ return false
+ }
+ for i, b := range ln {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ if !LabelName(s).IsValid() {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ln *LabelName) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if !LabelName(s).IsValid() {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// LabelNames is a sortable LabelName slice. In implements sort.Interface.
+type LabelNames []LabelName
+
+func (l LabelNames) Len() int {
+ return len(l)
+}
+
+func (l LabelNames) Less(i, j int) bool {
+ return l[i] < l[j]
+}
+
+func (l LabelNames) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+func (l LabelNames) String() string {
+ labelStrings := make([]string, 0, len(l))
+ for _, label := range l {
+ labelStrings = append(labelStrings, string(label))
+ }
+ return strings.Join(labelStrings, ", ")
+}
+
+// A LabelValue is an associated value for a LabelName.
+type LabelValue string
+
+// IsValid returns true iff the string is a valid UTF8.
+func (lv LabelValue) IsValid() bool {
+ return utf8.ValidString(string(lv))
+}
+
+// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
+type LabelValues []LabelValue
+
+func (l LabelValues) Len() int {
+ return len(l)
+}
+
+func (l LabelValues) Less(i, j int) bool {
+ return string(l[i]) < string(l[j])
+}
+
+func (l LabelValues) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+// LabelPair pairs a name with a value.
+type LabelPair struct {
+ Name LabelName
+ Value LabelValue
+}
+
+// LabelPairs is a sortable slice of LabelPair pointers. It implements
+// sort.Interface.
+type LabelPairs []*LabelPair
+
+func (l LabelPairs) Len() int {
+ return len(l)
+}
+
+func (l LabelPairs) Less(i, j int) bool {
+ switch {
+ case l[i].Name > l[j].Name:
+ return false
+ case l[i].Name < l[j].Name:
+ return true
+ case l[i].Value > l[j].Value:
+ return false
+ case l[i].Value < l[j].Value:
+ return true
+ default:
+ return false
+ }
+}
+
+func (l LabelPairs) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
diff --git a/vendor/github.com/prometheus/common/model/labels_test.go b/vendor/github.com/prometheus/common/model/labels_test.go
new file mode 100644
index 000000000..e8df28ffa
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labels_test.go
@@ -0,0 +1,140 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "sort"
+ "testing"
+)
+
+func testLabelNames(t testing.TB) {
+ var scenarios = []struct {
+ in LabelNames
+ out LabelNames
+ }{
+ {
+ in: LabelNames{"ZZZ", "zzz"},
+ out: LabelNames{"ZZZ", "zzz"},
+ },
+ {
+ in: LabelNames{"aaa", "AAA"},
+ out: LabelNames{"AAA", "aaa"},
+ },
+ }
+
+ for i, scenario := range scenarios {
+ sort.Sort(scenario.in)
+
+ for j, expected := range scenario.out {
+ if expected != scenario.in[j] {
+ t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j])
+ }
+ }
+ }
+}
+
+func TestLabelNames(t *testing.T) {
+ testLabelNames(t)
+}
+
+func BenchmarkLabelNames(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testLabelNames(b)
+ }
+}
+
+func testLabelValues(t testing.TB) {
+ var scenarios = []struct {
+ in LabelValues
+ out LabelValues
+ }{
+ {
+ in: LabelValues{"ZZZ", "zzz"},
+ out: LabelValues{"ZZZ", "zzz"},
+ },
+ {
+ in: LabelValues{"aaa", "AAA"},
+ out: LabelValues{"AAA", "aaa"},
+ },
+ }
+
+ for i, scenario := range scenarios {
+ sort.Sort(scenario.in)
+
+ for j, expected := range scenario.out {
+ if expected != scenario.in[j] {
+ t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j])
+ }
+ }
+ }
+}
+
+func TestLabelValues(t *testing.T) {
+ testLabelValues(t)
+}
+
+func BenchmarkLabelValues(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testLabelValues(b)
+ }
+}
+
+func TestLabelNameIsValid(t *testing.T) {
+ var scenarios = []struct {
+ ln LabelName
+ valid bool
+ }{
+ {
+ ln: "Avalid_23name",
+ valid: true,
+ },
+ {
+ ln: "_Avalid_23name",
+ valid: true,
+ },
+ {
+ ln: "1valid_23name",
+ valid: false,
+ },
+ {
+ ln: "avalid_23name",
+ valid: true,
+ },
+ {
+ ln: "Ava:lid_23name",
+ valid: false,
+ },
+ {
+ ln: "a lid_23name",
+ valid: false,
+ },
+ {
+ ln: ":leading_colon",
+ valid: false,
+ },
+ {
+ ln: "colon:in:the:middle",
+ valid: false,
+ },
+ }
+
+ for _, s := range scenarios {
+ if s.ln.IsValid() != s.valid {
+ t.Errorf("Expected %v for %q using IsValid method", s.valid, s.ln)
+ }
+ if LabelNameRE.MatchString(string(s.ln)) != s.valid {
+ t.Errorf("Expected %v for %q using regexp match", s.valid, s.ln)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
new file mode 100644
index 000000000..6eda08a73
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset.go
@@ -0,0 +1,169 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
+// may be fully-qualified down to the point where it may resolve to a single
+// Metric in the data store or not. All operations that occur within the realm
+// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
+// match.
+type LabelSet map[LabelName]LabelValue
+
+// Validate checks whether all names and values in the label set
+// are valid.
+func (ls LabelSet) Validate() error {
+ for ln, lv := range ls {
+ if !ln.IsValid() {
+ return fmt.Errorf("invalid name %q", ln)
+ }
+ if !lv.IsValid() {
+ return fmt.Errorf("invalid value %q", lv)
+ }
+ }
+ return nil
+}
+
+// Equal returns true iff both label sets have exactly the same key/value pairs.
+func (ls LabelSet) Equal(o LabelSet) bool {
+ if len(ls) != len(o) {
+ return false
+ }
+ for ln, lv := range ls {
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if olv != lv {
+ return false
+ }
+ }
+ return true
+}
+
+// Before compares the metrics, using the following criteria:
+//
+// If m has fewer labels than o, it is before o. If it has more, it is not.
+//
+// If the number of labels is the same, the superset of all label names is
+// sorted alphanumerically. The first differing label pair found in that order
+// determines the outcome: If the label does not exist at all in m, then m is
+// before o, and vice versa. Otherwise the label value is compared
+// alphanumerically.
+//
+// If m and o are equal, the method returns false.
+func (ls LabelSet) Before(o LabelSet) bool {
+ if len(ls) < len(o) {
+ return true
+ }
+ if len(ls) > len(o) {
+ return false
+ }
+
+ lns := make(LabelNames, 0, len(ls)+len(o))
+ for ln := range ls {
+ lns = append(lns, ln)
+ }
+ for ln := range o {
+ lns = append(lns, ln)
+ }
+ // It's probably not worth it to de-dup lns.
+ sort.Sort(lns)
+ for _, ln := range lns {
+ mlv, ok := ls[ln]
+ if !ok {
+ return true
+ }
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if mlv < olv {
+ return true
+ }
+ if mlv > olv {
+ return false
+ }
+ }
+ return false
+}
+
+// Clone returns a copy of the label set.
+func (ls LabelSet) Clone() LabelSet {
+ lsn := make(LabelSet, len(ls))
+ for ln, lv := range ls {
+ lsn[ln] = lv
+ }
+ return lsn
+}
+
+// Merge is a helper function to non-destructively merge two label sets.
+func (l LabelSet) Merge(other LabelSet) LabelSet {
+ result := make(LabelSet, len(l))
+
+ for k, v := range l {
+ result[k] = v
+ }
+
+ for k, v := range other {
+ result[k] = v
+ }
+
+ return result
+}
+
+func (l LabelSet) String() string {
+ lstrs := make([]string, 0, len(l))
+ for l, v := range l {
+ lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
+ }
+
+ sort.Strings(lstrs)
+ return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
+}
+
+// Fingerprint returns the LabelSet's fingerprint.
+func (ls LabelSet) Fingerprint() Fingerprint {
+ return labelSetToFingerprint(ls)
+}
+
+// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (ls LabelSet) FastFingerprint() Fingerprint {
+ return labelSetToFastFingerprint(ls)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (l *LabelSet) UnmarshalJSON(b []byte) error {
+ var m map[LabelName]LabelValue
+ if err := json.Unmarshal(b, &m); err != nil {
+ return err
+ }
+ // encoding/json only unmarshals maps of the form map[string]T. It treats
+ // LabelName as a string and does not call its UnmarshalJSON method.
+ // Thus, we have to replicate the behavior here.
+ for ln := range m {
+ if !ln.IsValid() {
+ return fmt.Errorf("%q is not a valid label name", ln)
+ }
+ }
+ *l = LabelSet(m)
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
new file mode 100644
index 000000000..9dff899cb
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -0,0 +1,103 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+var (
+ separator = []byte{0}
+ // MetricNameRE is a regular expression matching valid metric
+ // names. Note that the IsValidMetricName function performs the same
+ // check but faster than a match with this regular expression.
+ MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
+)
+
+// A Metric is similar to a LabelSet, but the key difference is that a Metric is
+// a singleton and refers to one and only one stream of samples.
+type Metric LabelSet
+
+// Equal compares the metrics.
+func (m Metric) Equal(o Metric) bool {
+ return LabelSet(m).Equal(LabelSet(o))
+}
+
+// Before compares the metrics' underlying label sets.
+func (m Metric) Before(o Metric) bool {
+ return LabelSet(m).Before(LabelSet(o))
+}
+
+// Clone returns a copy of the Metric.
+func (m Metric) Clone() Metric {
+ clone := Metric{}
+ for k, v := range m {
+ clone[k] = v
+ }
+ return clone
+}
+
+func (m Metric) String() string {
+ metricName, hasName := m[MetricNameLabel]
+ numLabels := len(m) - 1
+ if !hasName {
+ numLabels = len(m)
+ }
+ labelStrings := make([]string, 0, numLabels)
+ for label, value := range m {
+ if label != MetricNameLabel {
+ labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
+ }
+ }
+
+ switch numLabels {
+ case 0:
+ if hasName {
+ return string(metricName)
+ }
+ return "{}"
+ default:
+ sort.Strings(labelStrings)
+ return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+ }
+}
+
+// Fingerprint returns a Metric's Fingerprint.
+func (m Metric) Fingerprint() Fingerprint {
+ return LabelSet(m).Fingerprint()
+}
+
+// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (m Metric) FastFingerprint() Fingerprint {
+ return LabelSet(m).FastFingerprint()
+}
+
+// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
+// This function, however, does not use MetricNameRE for the check but a much
+// faster hardcoded implementation.
+func IsValidMetricName(n LabelValue) bool {
+ if len(n) == 0 {
+ return false
+ }
+ for i, b := range n {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/prometheus/common/model/metric_test.go b/vendor/github.com/prometheus/common/model/metric_test.go
new file mode 100644
index 000000000..06f9de525
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metric_test.go
@@ -0,0 +1,132 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import "testing"
+
+func testMetric(t testing.TB) {
+ var scenarios = []struct {
+ input LabelSet
+ fingerprint Fingerprint
+ fastFingerprint Fingerprint
+ }{
+ {
+ input: LabelSet{},
+ fingerprint: 14695981039346656037,
+ fastFingerprint: 14695981039346656037,
+ },
+ {
+ input: LabelSet{
+ "first_name": "electro",
+ "occupation": "robot",
+ "manufacturer": "westinghouse",
+ },
+ fingerprint: 5911716720268894962,
+ fastFingerprint: 11310079640881077873,
+ },
+ {
+ input: LabelSet{
+ "x": "y",
+ },
+ fingerprint: 8241431561484471700,
+ fastFingerprint: 13948396922932177635,
+ },
+ {
+ input: LabelSet{
+ "a": "bb",
+ "b": "c",
+ },
+ fingerprint: 3016285359649981711,
+ fastFingerprint: 3198632812309449502,
+ },
+ {
+ input: LabelSet{
+ "a": "b",
+ "bb": "c",
+ },
+ fingerprint: 7122421792099404749,
+ fastFingerprint: 5774953389407657638,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ input := Metric(scenario.input)
+
+ if scenario.fingerprint != input.Fingerprint() {
+ t.Errorf("%d. expected %d, got %d", i, scenario.fingerprint, input.Fingerprint())
+ }
+ if scenario.fastFingerprint != input.FastFingerprint() {
+ t.Errorf("%d. expected %d, got %d", i, scenario.fastFingerprint, input.FastFingerprint())
+ }
+ }
+}
+
+func TestMetric(t *testing.T) {
+ testMetric(t)
+}
+
+func BenchmarkMetric(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testMetric(b)
+ }
+}
+
+func TestMetricNameIsValid(t *testing.T) {
+ var scenarios = []struct {
+ mn LabelValue
+ valid bool
+ }{
+ {
+ mn: "Avalid_23name",
+ valid: true,
+ },
+ {
+ mn: "_Avalid_23name",
+ valid: true,
+ },
+ {
+ mn: "1valid_23name",
+ valid: false,
+ },
+ {
+ mn: "avalid_23name",
+ valid: true,
+ },
+ {
+ mn: "Ava:lid_23name",
+ valid: true,
+ },
+ {
+ mn: "a lid_23name",
+ valid: false,
+ },
+ {
+ mn: ":leading_colon",
+ valid: true,
+ },
+ {
+ mn: "colon:in:the:middle",
+ valid: true,
+ },
+ }
+
+ for _, s := range scenarios {
+ if IsValidMetricName(s.mn) != s.valid {
+ t.Errorf("Expected %v for %q using IsValidMetricName function", s.valid, s.mn)
+ }
+ if MetricNameRE.MatchString(string(s.mn)) != s.valid {
+ t.Errorf("Expected %v for %q using regexp matching", s.valid, s.mn)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go
new file mode 100644
index 000000000..a7b969170
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/model.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package model contains common data structures that are shared across
+// Prometheus components and libraries.
+package model
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
new file mode 100644
index 000000000..8762b13c6
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/signature.go
@@ -0,0 +1,144 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "sort"
+)
+
+// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
+// used to separate label names, label values, and other strings from each other
+// when calculating their combined hash value (aka signature aka fingerprint).
+const SeparatorByte byte = 255
+
+var (
+ // cache the signature of an empty label set.
+ emptyLabelSignature = hashNew()
+)
+
+// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
+// given label set. (Collisions are possible but unlikely if the number of label
+// sets the function is applied to is small.)
+func LabelsToSignature(labels map[string]string) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make([]string, 0, len(labels))
+ for labelName := range labels {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Strings(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, labelName)
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, labels[labelName])
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
+// parameter (rather than a label map) and returns a Fingerprint.
+func labelSetToFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ labelNames := make(LabelNames, 0, len(ls))
+ for labelName := range ls {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(ls[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return Fingerprint(sum)
+}
+
+// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
+// faster and less allocation-heavy hash function, which is more susceptible to
+// create hash collisions. Therefore, collision detection should be applied.
+func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ var result uint64
+ for labelName, labelValue := range ls {
+ sum := hashNew()
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(labelValue))
+ result ^= sum
+ }
+ return Fingerprint(result)
+}
+
+// SignatureForLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and only includes the labels with the
+// specified LabelNames into the signature calculation. The labels passed in
+// will be sorted by this function.
+func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ sort.Sort(LabelNames(labels))
+
+ sum := hashNew()
+ for _, label := range labels {
+ sum = hashAdd(sum, string(label))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[label]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and excludes the labels with any of the
+// specified LabelNames from the signature calculation.
+func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
+ if len(m) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make(LabelNames, 0, len(m))
+ for labelName := range m {
+ if _, exclude := labels[labelName]; !exclude {
+ labelNames = append(labelNames, labelName)
+ }
+ }
+ if len(labelNames) == 0 {
+ return emptyLabelSignature
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
diff --git a/vendor/github.com/prometheus/common/model/signature_test.go b/vendor/github.com/prometheus/common/model/signature_test.go
new file mode 100644
index 000000000..d59c8a8c3
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/signature_test.go
@@ -0,0 +1,314 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "runtime"
+ "sync"
+ "testing"
+)
+
+func TestLabelsToSignature(t *testing.T) {
+ var scenarios = []struct {
+ in map[string]string
+ out uint64
+ }{
+ {
+ in: map[string]string{},
+ out: 14695981039346656037,
+ },
+ {
+ in: map[string]string{"name": "garland, briggs", "fear": "love is not enough"},
+ out: 5799056148416392346,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ actual := LabelsToSignature(scenario.in)
+
+ if actual != scenario.out {
+ t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
+ }
+ }
+}
+
+func TestMetricToFingerprint(t *testing.T) {
+ var scenarios = []struct {
+ in LabelSet
+ out Fingerprint
+ }{
+ {
+ in: LabelSet{},
+ out: 14695981039346656037,
+ },
+ {
+ in: LabelSet{"name": "garland, briggs", "fear": "love is not enough"},
+ out: 5799056148416392346,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ actual := labelSetToFingerprint(scenario.in)
+
+ if actual != scenario.out {
+ t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
+ }
+ }
+}
+
+func TestMetricToFastFingerprint(t *testing.T) {
+ var scenarios = []struct {
+ in LabelSet
+ out Fingerprint
+ }{
+ {
+ in: LabelSet{},
+ out: 14695981039346656037,
+ },
+ {
+ in: LabelSet{"name": "garland, briggs", "fear": "love is not enough"},
+ out: 12952432476264840823,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ actual := labelSetToFastFingerprint(scenario.in)
+
+ if actual != scenario.out {
+ t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
+ }
+ }
+}
+
+func TestSignatureForLabels(t *testing.T) {
+ var scenarios = []struct {
+ in Metric
+ labels LabelNames
+ out uint64
+ }{
+ {
+ in: Metric{},
+ labels: nil,
+ out: 14695981039346656037,
+ },
+ {
+ in: Metric{},
+ labels: LabelNames{"empty"},
+ out: 7187873163539638612,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: LabelNames{"empty"},
+ out: 7187873163539638612,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: LabelNames{"fear", "name"},
+ out: 5799056148416392346,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"},
+ labels: LabelNames{"fear", "name"},
+ out: 5799056148416392346,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: LabelNames{},
+ out: 14695981039346656037,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: nil,
+ out: 14695981039346656037,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ actual := SignatureForLabels(scenario.in, scenario.labels...)
+
+ if actual != scenario.out {
+ t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
+ }
+ }
+}
+
+func TestSignatureWithoutLabels(t *testing.T) {
+ var scenarios = []struct {
+ in Metric
+ labels map[LabelName]struct{}
+ out uint64
+ }{
+ {
+ in: Metric{},
+ labels: nil,
+ out: 14695981039346656037,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: map[LabelName]struct{}{"fear": struct{}{}, "name": struct{}{}},
+ out: 14695981039346656037,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"},
+ labels: map[LabelName]struct{}{"foo": struct{}{}},
+ out: 5799056148416392346,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: map[LabelName]struct{}{},
+ out: 5799056148416392346,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: nil,
+ out: 5799056148416392346,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ actual := SignatureWithoutLabels(scenario.in, scenario.labels)
+
+ if actual != scenario.out {
+ t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
+ }
+ }
+}
+
+func benchmarkLabelToSignature(b *testing.B, l map[string]string, e uint64) {
+ for i := 0; i < b.N; i++ {
+ if a := LabelsToSignature(l); a != e {
+ b.Fatalf("expected signature of %d for %s, got %d", e, l, a)
+ }
+ }
+}
+
+func BenchmarkLabelToSignatureScalar(b *testing.B) {
+ benchmarkLabelToSignature(b, nil, 14695981039346656037)
+}
+
+func BenchmarkLabelToSignatureSingle(b *testing.B) {
+ benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value"}, 5146282821936882169)
+}
+
+func BenchmarkLabelToSignatureDouble(b *testing.B) {
+ benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717)
+}
+
+func BenchmarkLabelToSignatureTriple(b *testing.B) {
+ benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121)
+}
+
+func benchmarkMetricToFingerprint(b *testing.B, ls LabelSet, e Fingerprint) {
+ for i := 0; i < b.N; i++ {
+ if a := labelSetToFingerprint(ls); a != e {
+ b.Fatalf("expected signature of %d for %s, got %d", e, ls, a)
+ }
+ }
+}
+
+func BenchmarkMetricToFingerprintScalar(b *testing.B) {
+ benchmarkMetricToFingerprint(b, nil, 14695981039346656037)
+}
+
+func BenchmarkMetricToFingerprintSingle(b *testing.B) {
+ benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value"}, 5146282821936882169)
+}
+
+func BenchmarkMetricToFingerprintDouble(b *testing.B) {
+ benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717)
+}
+
+func BenchmarkMetricToFingerprintTriple(b *testing.B) {
+ benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121)
+}
+
+func benchmarkMetricToFastFingerprint(b *testing.B, ls LabelSet, e Fingerprint) {
+ for i := 0; i < b.N; i++ {
+ if a := labelSetToFastFingerprint(ls); a != e {
+ b.Fatalf("expected signature of %d for %s, got %d", e, ls, a)
+ }
+ }
+}
+
+func BenchmarkMetricToFastFingerprintScalar(b *testing.B) {
+ benchmarkMetricToFastFingerprint(b, nil, 14695981039346656037)
+}
+
+func BenchmarkMetricToFastFingerprintSingle(b *testing.B) {
+ benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value"}, 5147259542624943964)
+}
+
+func BenchmarkMetricToFastFingerprintDouble(b *testing.B) {
+ benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value"}, 18269973311206963528)
+}
+
+func BenchmarkMetricToFastFingerprintTriple(b *testing.B) {
+ benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676)
+}
+
+func BenchmarkEmptyLabelSignature(b *testing.B) {
+ input := []map[string]string{nil, {}}
+
+ var ms runtime.MemStats
+ runtime.ReadMemStats(&ms)
+
+ alloc := ms.Alloc
+
+ for _, labels := range input {
+ LabelsToSignature(labels)
+ }
+
+ runtime.ReadMemStats(&ms)
+
+ if got := ms.Alloc; alloc != got {
+ b.Fatal("expected LabelsToSignature with empty labels not to perform allocations")
+ }
+}
+
+func benchmarkMetricToFastFingerprintConc(b *testing.B, ls LabelSet, e Fingerprint, concLevel int) {
+ var start, end sync.WaitGroup
+ start.Add(1)
+ end.Add(concLevel)
+
+ for i := 0; i < concLevel; i++ {
+ go func() {
+ start.Wait()
+ for j := b.N / concLevel; j >= 0; j-- {
+ if a := labelSetToFastFingerprint(ls); a != e {
+ b.Fatalf("expected signature of %d for %s, got %d", e, ls, a)
+ }
+ }
+ end.Done()
+ }()
+ }
+ b.ResetTimer()
+ start.Done()
+ end.Wait()
+}
+
+func BenchmarkMetricToFastFingerprintTripleConc1(b *testing.B) {
+ benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 1)
+}
+
+func BenchmarkMetricToFastFingerprintTripleConc2(b *testing.B) {
+ benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 2)
+}
+
+func BenchmarkMetricToFastFingerprintTripleConc4(b *testing.B) {
+ benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 4)
+}
+
+func BenchmarkMetricToFastFingerprintTripleConc8(b *testing.B) {
+ benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 8)
+}
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
new file mode 100644
index 000000000..7538e2997
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -0,0 +1,106 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "time"
+)
+
+// Matcher describes a matches the value of a given label.
+type Matcher struct {
+ Name LabelName `json:"name"`
+ Value string `json:"value"`
+ IsRegex bool `json:"isRegex"`
+}
+
+func (m *Matcher) UnmarshalJSON(b []byte) error {
+ type plain Matcher
+ if err := json.Unmarshal(b, (*plain)(m)); err != nil {
+ return err
+ }
+
+ if len(m.Name) == 0 {
+ return fmt.Errorf("label name in matcher must not be empty")
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Validate returns true iff all fields of the matcher have valid values.
+func (m *Matcher) Validate() error {
+ if !m.Name.IsValid() {
+ return fmt.Errorf("invalid name %q", m.Name)
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return fmt.Errorf("invalid regular expression %q", m.Value)
+ }
+ } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
+ return fmt.Errorf("invalid value %q", m.Value)
+ }
+ return nil
+}
+
+// Silence defines the representation of a silence definiton
+// in the Prometheus eco-system.
+type Silence struct {
+ ID uint64 `json:"id,omitempty"`
+
+ Matchers []*Matcher `json:"matchers"`
+
+ StartsAt time.Time `json:"startsAt"`
+ EndsAt time.Time `json:"endsAt"`
+
+ CreatedAt time.Time `json:"createdAt,omitempty"`
+ CreatedBy string `json:"createdBy"`
+ Comment string `json:"comment,omitempty"`
+}
+
+// Validate returns true iff all fields of the silence have valid values.
+func (s *Silence) Validate() error {
+ if len(s.Matchers) == 0 {
+ return fmt.Errorf("at least one matcher required")
+ }
+ for _, m := range s.Matchers {
+ if err := m.Validate(); err != nil {
+ return fmt.Errorf("invalid matcher: %s", err)
+ }
+ }
+ if s.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if s.EndsAt.IsZero() {
+ return fmt.Errorf("end time missing")
+ }
+ if s.EndsAt.Before(s.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if s.CreatedBy == "" {
+ return fmt.Errorf("creator information missing")
+ }
+ if s.Comment == "" {
+ return fmt.Errorf("comment missing")
+ }
+ if s.CreatedAt.IsZero() {
+ return fmt.Errorf("creation timestamp missing")
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/silence_test.go b/vendor/github.com/prometheus/common/model/silence_test.go
new file mode 100644
index 000000000..8eaaf0744
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/silence_test.go
@@ -0,0 +1,228 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestMatcherValidate(t *testing.T) {
+ var cases = []struct {
+ matcher *Matcher
+ err string
+ }{
+ {
+ matcher: &Matcher{
+ Name: "name",
+ Value: "value",
+ },
+ },
+ {
+ matcher: &Matcher{
+ Name: "name",
+ Value: "value",
+ IsRegex: true,
+ },
+ },
+ {
+ matcher: &Matcher{
+ Name: "name!",
+ Value: "value",
+ },
+ err: "invalid name",
+ },
+ {
+ matcher: &Matcher{
+ Name: "",
+ Value: "value",
+ },
+ err: "invalid name",
+ },
+ {
+ matcher: &Matcher{
+ Name: "name",
+ Value: "value\xff",
+ },
+ err: "invalid value",
+ },
+ {
+ matcher: &Matcher{
+ Name: "name",
+ Value: "",
+ },
+ err: "invalid value",
+ },
+ }
+
+ for i, c := range cases {
+ err := c.matcher.Validate()
+ if err == nil {
+ if c.err == "" {
+ continue
+ }
+ t.Errorf("%d. Expected error %q but got none", i, c.err)
+ continue
+ }
+ if c.err == "" && err != nil {
+ t.Errorf("%d. Expected no error but got %q", i, err)
+ continue
+ }
+ if !strings.Contains(err.Error(), c.err) {
+ t.Errorf("%d. Expected error to contain %q but got %q", i, c.err, err)
+ }
+ }
+}
+
+func TestSilenceValidate(t *testing.T) {
+ ts := time.Now()
+
+ var cases = []struct {
+ sil *Silence
+ err string
+ }{
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ StartsAt: ts,
+ EndsAt: ts,
+ CreatedAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ {Name: "name", Value: "value"},
+ {Name: "name", Value: "value"},
+ {Name: "name", Value: "value", IsRegex: true},
+ },
+ StartsAt: ts,
+ EndsAt: ts,
+ CreatedAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ StartsAt: ts,
+ EndsAt: ts.Add(-1 * time.Minute),
+ CreatedAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ err: "start time must be before end time",
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ StartsAt: ts,
+ CreatedAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ err: "end time missing",
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ EndsAt: ts,
+ CreatedAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ err: "start time missing",
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "!name", Value: "value"},
+ },
+ StartsAt: ts,
+ EndsAt: ts,
+ CreatedAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ err: "invalid matcher",
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ StartsAt: ts,
+ EndsAt: ts,
+ CreatedAt: ts,
+ CreatedBy: "name",
+ },
+ err: "comment missing",
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ StartsAt: ts,
+ EndsAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ err: "creation timestamp missing",
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ StartsAt: ts,
+ EndsAt: ts,
+ CreatedAt: ts,
+ Comment: "comment",
+ },
+ err: "creator information missing",
+ },
+ }
+
+ for i, c := range cases {
+ err := c.sil.Validate()
+ if err == nil {
+ if c.err == "" {
+ continue
+ }
+ t.Errorf("%d. Expected error %q but got none", i, c.err)
+ continue
+ }
+ if c.err == "" && err != nil {
+ t.Errorf("%d. Expected no error but got %q", i, err)
+ continue
+ }
+ if !strings.Contains(err.Error(), c.err) {
+ t.Errorf("%d. Expected error to contain %q but got %q", i, c.err, err)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
new file mode 100644
index 000000000..548968aeb
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -0,0 +1,249 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ // MinimumTick is the minimum supported time resolution. This has to be
+ // at least time.Second in order for the code below to work.
+ minimumTick = time.Millisecond
+ // second is the Time duration equivalent to one second.
+ second = int64(time.Second / minimumTick)
+ // The number of nanoseconds per minimum tick.
+ nanosPerTick = int64(minimumTick / time.Nanosecond)
+
+ // Earliest is the earliest Time representable. Handy for
+ // initializing a high watermark.
+ Earliest = Time(math.MinInt64)
+ // Latest is the latest Time representable. Handy for initializing
+ // a low watermark.
+ Latest = Time(math.MaxInt64)
+)
+
+// Time is the number of milliseconds since the epoch
+// (1970-01-01 00:00 UTC) excluding leap seconds.
+type Time int64
+
+// Interval describes and interval between two timestamps.
+type Interval struct {
+ Start, End Time
+}
+
+// Now returns the current time as a Time.
+func Now() Time {
+ return TimeFromUnixNano(time.Now().UnixNano())
+}
+
+// TimeFromUnix returns the Time equivalent to the Unix Time t
+// provided in seconds.
+func TimeFromUnix(t int64) Time {
+ return Time(t * second)
+}
+
+// TimeFromUnixNano returns the Time equivalent to the Unix Time
+// t provided in nanoseconds.
+func TimeFromUnixNano(t int64) Time {
+ return Time(t / nanosPerTick)
+}
+
+// Equal reports whether two Times represent the same instant.
+func (t Time) Equal(o Time) bool {
+ return t == o
+}
+
+// Before reports whether the Time t is before o.
+func (t Time) Before(o Time) bool {
+ return t < o
+}
+
+// After reports whether the Time t is after o.
+func (t Time) After(o Time) bool {
+ return t > o
+}
+
+// Add returns the Time t + d.
+func (t Time) Add(d time.Duration) Time {
+ return t + Time(d/minimumTick)
+}
+
+// Sub returns the Duration t - o.
+func (t Time) Sub(o Time) time.Duration {
+ return time.Duration(t-o) * minimumTick
+}
+
+// Time returns the time.Time representation of t.
+func (t Time) Time() time.Time {
+ return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) Unix() int64 {
+ return int64(t) / second
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) UnixNano() int64 {
+ return int64(t) * nanosPerTick
+}
+
+// The number of digits after the dot.
+var dotPrecision = int(math.Log10(float64(second)))
+
+// String returns a string representation of the Time.
+func (t Time) String() string {
+ return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+ return []byte(t.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+ p := strings.Split(string(b), ".")
+ switch len(p) {
+ case 1:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ *t = Time(v * second)
+
+ case 2:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ v *= second
+
+ prec := dotPrecision - len(p[1])
+ if prec < 0 {
+ p[1] = p[1][:dotPrecision]
+ } else if prec > 0 {
+ p[1] = p[1] + strings.Repeat("0", prec)
+ }
+
+ va, err := strconv.ParseInt(p[1], 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *t = Time(v + va)
+
+ default:
+ return fmt.Errorf("invalid time %q", string(b))
+ }
+ return nil
+}
+
+// Duration wraps time.Duration. It is used to parse the custom duration format
+// from YAML.
+// This type should not propagate beyond the scope of input/output processing.
+type Duration time.Duration
+
+var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
+
+// StringToDuration parses a string into a time.Duration, assuming that a year
+// always has 365d, a week always has 7d, and a day always has 24h.
+func ParseDuration(durationStr string) (Duration, error) {
+ matches := durationRE.FindStringSubmatch(durationStr)
+ if len(matches) != 3 {
+ return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
+ }
+ var (
+ n, _ = strconv.Atoi(matches[1])
+ dur = time.Duration(n) * time.Millisecond
+ )
+ switch unit := matches[2]; unit {
+ case "y":
+ dur *= 1000 * 60 * 60 * 24 * 365
+ case "w":
+ dur *= 1000 * 60 * 60 * 24 * 7
+ case "d":
+ dur *= 1000 * 60 * 60 * 24
+ case "h":
+ dur *= 1000 * 60 * 60
+ case "m":
+ dur *= 1000 * 60
+ case "s":
+ dur *= 1000
+ case "ms":
+ // Value already correct
+ default:
+ return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
+ }
+ return Duration(dur), nil
+}
+
+func (d Duration) String() string {
+ var (
+ ms = int64(time.Duration(d) / time.Millisecond)
+ unit = "ms"
+ )
+ factors := map[string]int64{
+ "y": 1000 * 60 * 60 * 24 * 365,
+ "w": 1000 * 60 * 60 * 24 * 7,
+ "d": 1000 * 60 * 60 * 24,
+ "h": 1000 * 60 * 60,
+ "m": 1000 * 60,
+ "s": 1000,
+ "ms": 1,
+ }
+
+ switch int64(0) {
+ case ms % factors["y"]:
+ unit = "y"
+ case ms % factors["w"]:
+ unit = "w"
+ case ms % factors["d"]:
+ unit = "d"
+ case ms % factors["h"]:
+ unit = "h"
+ case ms % factors["m"]:
+ unit = "m"
+ case ms % factors["s"]:
+ unit = "s"
+ }
+ return fmt.Sprintf("%v%v", ms/factors[unit], unit)
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (d Duration) MarshalYAML() (interface{}, error) {
+ return d.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ dur, err := ParseDuration(s)
+ if err != nil {
+ return err
+ }
+ *d = dur
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/time_test.go b/vendor/github.com/prometheus/common/model/time_test.go
new file mode 100644
index 000000000..45ffd872d
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/time_test.go
@@ -0,0 +1,129 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "testing"
+ "time"
+)
+
+func TestComparators(t *testing.T) {
+ t1a := TimeFromUnix(0)
+ t1b := TimeFromUnix(0)
+ t2 := TimeFromUnix(2*second - 1)
+
+ if !t1a.Equal(t1b) {
+ t.Fatalf("Expected %s to be equal to %s", t1a, t1b)
+ }
+ if t1a.Equal(t2) {
+ t.Fatalf("Expected %s to not be equal to %s", t1a, t2)
+ }
+
+ if !t1a.Before(t2) {
+ t.Fatalf("Expected %s to be before %s", t1a, t2)
+ }
+ if t1a.Before(t1b) {
+ t.Fatalf("Expected %s to not be before %s", t1a, t1b)
+ }
+
+ if !t2.After(t1a) {
+ t.Fatalf("Expected %s to be after %s", t2, t1a)
+ }
+ if t1b.After(t1a) {
+ t.Fatalf("Expected %s to not be after %s", t1b, t1a)
+ }
+}
+
+func TestTimeConversions(t *testing.T) {
+ unixSecs := int64(1136239445)
+ unixNsecs := int64(123456789)
+ unixNano := unixSecs*1e9 + unixNsecs
+
+ t1 := time.Unix(unixSecs, unixNsecs-unixNsecs%nanosPerTick)
+ t2 := time.Unix(unixSecs, unixNsecs)
+
+ ts := TimeFromUnixNano(unixNano)
+ if !ts.Time().Equal(t1) {
+ t.Fatalf("Expected %s, got %s", t1, ts.Time())
+ }
+
+ // Test available precision.
+ ts = TimeFromUnixNano(t2.UnixNano())
+ if !ts.Time().Equal(t1) {
+ t.Fatalf("Expected %s, got %s", t1, ts.Time())
+ }
+
+ if ts.UnixNano() != unixNano-unixNano%nanosPerTick {
+ t.Fatalf("Expected %d, got %d", unixNano, ts.UnixNano())
+ }
+}
+
+func TestDuration(t *testing.T) {
+ duration := time.Second + time.Minute + time.Hour
+ goTime := time.Unix(1136239445, 0)
+
+ ts := TimeFromUnix(goTime.Unix())
+ if !goTime.Add(duration).Equal(ts.Add(duration).Time()) {
+ t.Fatalf("Expected %s to be equal to %s", goTime.Add(duration), ts.Add(duration))
+ }
+
+ earlier := ts.Add(-duration)
+ delta := ts.Sub(earlier)
+ if delta != duration {
+ t.Fatalf("Expected %s to be equal to %s", delta, duration)
+ }
+}
+
+func TestParseDuration(t *testing.T) {
+ var cases = []struct {
+ in string
+ out time.Duration
+ }{
+ {
+ in: "324ms",
+ out: 324 * time.Millisecond,
+ }, {
+ in: "3s",
+ out: 3 * time.Second,
+ }, {
+ in: "5m",
+ out: 5 * time.Minute,
+ }, {
+ in: "1h",
+ out: time.Hour,
+ }, {
+ in: "4d",
+ out: 4 * 24 * time.Hour,
+ }, {
+ in: "3w",
+ out: 3 * 7 * 24 * time.Hour,
+ }, {
+ in: "10y",
+ out: 10 * 365 * 24 * time.Hour,
+ },
+ }
+
+ for _, c := range cases {
+ d, err := ParseDuration(c.in)
+ if err != nil {
+ t.Errorf("Unexpected error on input %q", c.in)
+ }
+ if time.Duration(d) != c.out {
+ t.Errorf("Expected %v but got %v", c.out, d)
+ }
+ if d.String() != c.in {
+ t.Errorf("Expected duration string %q but got %q", c.in, d.String())
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
new file mode 100644
index 000000000..7728abaee
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -0,0 +1,419 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+var (
+ // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+ // non-existing sample pair. It is a SamplePair with timestamp Earliest and
+ // value 0.0. Note that the natural zero value of SamplePair has a timestamp
+ // of 0, which is possible to appear in a real SamplePair and thus not
+ // suitable to signal a non-existing SamplePair.
+ ZeroSamplePair = SamplePair{Timestamp: Earliest}
+
+ // ZeroSample is the pseudo zero-value of Sample used to signal a
+ // non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+ // and metric nil. Note that the natural zero value of Sample has a timestamp
+ // of 0, which is possible to appear in a real Sample and thus not suitable
+ // to signal a non-existing Sample.
+ ZeroSample = Sample{Timestamp: Earliest}
+)
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+ if v == o {
+ return true
+ }
+ return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The sematics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
+
+// Sample is a sample pair associated with a metric.
+type Sample struct {
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+// Equal compares first the metrics, then the timestamp, then the value. The
+// sematics of value equality is defined by SampleValue.Equal.
+func (s *Sample) Equal(o *Sample) bool {
+ if s == o {
+ return true
+ }
+
+ if !s.Metric.Equal(o.Metric) {
+ return false
+ }
+ if !s.Timestamp.Equal(o.Timestamp) {
+ return false
+ }
+ if s.Value.Equal(o.Value) {
+ return false
+ }
+
+ return true
+}
+
+func (s Sample) String() string {
+ return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ })
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Sample) MarshalJSON() ([]byte, error) {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ return json.Marshal(&v)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Sample) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ s.Metric = v.Metric
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+
+ return nil
+}
+
+// Samples is a sortable Sample slice. It implements sort.Interface.
+type Samples []*Sample
+
+func (s Samples) Len() int {
+ return len(s)
+}
+
+// Less compares first the metrics, then the timestamp.
+func (s Samples) Less(i, j int) bool {
+ switch {
+ case s[i].Metric.Before(s[j].Metric):
+ return true
+ case s[j].Metric.Before(s[i].Metric):
+ return false
+ case s[i].Timestamp.Before(s[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+func (s Samples) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (s Samples) Equal(o Samples) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, sample := range s {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// SampleStream is a stream of Values belonging to an attached COWMetric.
+type SampleStream struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+}
+
+func (ss SampleStream) String() string {
+ vals := make([]string, len(ss.Values))
+ for i, v := range ss.Values {
+ vals[i] = v.String()
+ }
+ return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
+}
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "<ValNone>":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return "<ValNone>"
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
+
+// Scalar is a scalar value evaluated at the set timestamp.
+type Scalar struct {
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s Scalar) String() string {
+ return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Scalar) MarshalJSON() ([]byte, error) {
+ v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
+ return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Scalar) UnmarshalJSON(b []byte) error {
+ var f string
+ v := [...]interface{}{&s.Timestamp, &f}
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ value, err := strconv.ParseFloat(f, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing sample value: %s", err)
+ }
+ s.Value = SampleValue(value)
+ return nil
+}
+
+// String is a string value evaluated at the set timestamp.
+type String struct {
+ Value string `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s *String) String() string {
+ return s.Value
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s String) MarshalJSON() ([]byte, error) {
+ return json.Marshal([]interface{}{s.Timestamp, s.Value})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *String) UnmarshalJSON(b []byte) error {
+ v := [...]interface{}{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Vector is basically only an alias for Samples, but the
+// contract is that in a Vector, all Samples have the same timestamp.
+type Vector []*Sample
+
+func (vec Vector) String() string {
+ entries := make([]string, len(vec))
+ for i, s := range vec {
+ entries[i] = s.String()
+ }
+ return strings.Join(entries, "\n")
+}
+
+func (vec Vector) Len() int { return len(vec) }
+func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
+
+// Less compares first the metrics, then the timestamp.
+func (vec Vector) Less(i, j int) bool {
+ switch {
+ case vec[i].Metric.Before(vec[j].Metric):
+ return true
+ case vec[j].Metric.Before(vec[i].Metric):
+ return false
+ case vec[i].Timestamp.Before(vec[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (vec Vector) Equal(o Vector) bool {
+ if len(vec) != len(o) {
+ return false
+ }
+
+ for i, sample := range vec {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Matrix is a list of time series.
+type Matrix []*SampleStream
+
+func (m Matrix) Len() int { return len(m) }
+func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
+func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
+
+func (mat Matrix) String() string {
+ matCp := make(Matrix, len(mat))
+ copy(matCp, mat)
+ sort.Sort(matCp)
+
+ strs := make([]string, len(matCp))
+
+ for i, ss := range matCp {
+ strs[i] = ss.String()
+ }
+
+ return strings.Join(strs, "\n")
+}
diff --git a/vendor/github.com/prometheus/common/model/value_test.go b/vendor/github.com/prometheus/common/model/value_test.go
new file mode 100644
index 000000000..8d2b69ea1
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_test.go
@@ -0,0 +1,417 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "math"
+ "reflect"
+ "sort"
+ "testing"
+)
+
+func TestEqual(t *testing.T) {
+ tests := map[string]struct {
+ in1, in2 SampleValue
+ want bool
+ }{
+ "equal floats": {
+ in1: 3.14,
+ in2: 3.14,
+ want: true,
+ },
+ "unequal floats": {
+ in1: 3.14,
+ in2: 3.1415,
+ want: false,
+ },
+ "positive inifinities": {
+ in1: SampleValue(math.Inf(+1)),
+ in2: SampleValue(math.Inf(+1)),
+ want: true,
+ },
+ "negative inifinities": {
+ in1: SampleValue(math.Inf(-1)),
+ in2: SampleValue(math.Inf(-1)),
+ want: true,
+ },
+ "different inifinities": {
+ in1: SampleValue(math.Inf(+1)),
+ in2: SampleValue(math.Inf(-1)),
+ want: false,
+ },
+ "number and infinity": {
+ in1: 42,
+ in2: SampleValue(math.Inf(+1)),
+ want: false,
+ },
+ "number and NaN": {
+ in1: 42,
+ in2: SampleValue(math.NaN()),
+ want: false,
+ },
+ "NaNs": {
+ in1: SampleValue(math.NaN()),
+ in2: SampleValue(math.NaN()),
+ want: true, // !!!
+ },
+ }
+
+ for name, test := range tests {
+ got := test.in1.Equal(test.in2)
+ if got != test.want {
+ t.Errorf("Comparing %s, %f and %f: got %t, want %t", name, test.in1, test.in2, got, test.want)
+ }
+ }
+}
+
+func TestSamplePairJSON(t *testing.T) {
+ input := []struct {
+ plain string
+ value SamplePair
+ }{
+ {
+ plain: `[1234.567,"123.1"]`,
+ value: SamplePair{
+ Value: 123.1,
+ Timestamp: 1234567,
+ },
+ },
+ }
+
+ for _, test := range input {
+ b, err := json.Marshal(test.value)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if string(b) != test.plain {
+ t.Errorf("encoding error: expected %q, got %q", test.plain, b)
+ continue
+ }
+
+ var sp SamplePair
+ err = json.Unmarshal(b, &sp)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if sp != test.value {
+ t.Errorf("decoding error: expected %v, got %v", test.value, sp)
+ }
+ }
+}
+
+func TestSampleJSON(t *testing.T) {
+ input := []struct {
+ plain string
+ value Sample
+ }{
+ {
+ plain: `{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]}`,
+ value: Sample{
+ Metric: Metric{
+ MetricNameLabel: "test_metric",
+ },
+ Value: 123.1,
+ Timestamp: 1234567,
+ },
+ },
+ }
+
+ for _, test := range input {
+ b, err := json.Marshal(test.value)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if string(b) != test.plain {
+ t.Errorf("encoding error: expected %q, got %q", test.plain, b)
+ continue
+ }
+
+ var sv Sample
+ err = json.Unmarshal(b, &sv)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if !reflect.DeepEqual(sv, test.value) {
+ t.Errorf("decoding error: expected %v, got %v", test.value, sv)
+ }
+ }
+}
+
+func TestVectorJSON(t *testing.T) {
+ input := []struct {
+ plain string
+ value Vector
+ }{
+ {
+ plain: `[]`,
+ value: Vector{},
+ },
+ {
+ plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]}]`,
+ value: Vector{&Sample{
+ Metric: Metric{
+ MetricNameLabel: "test_metric",
+ },
+ Value: 123.1,
+ Timestamp: 1234567,
+ }},
+ },
+ {
+ plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]},{"metric":{"foo":"bar"},"value":[1.234,"+Inf"]}]`,
+ value: Vector{
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "test_metric",
+ },
+ Value: 123.1,
+ Timestamp: 1234567,
+ },
+ &Sample{
+ Metric: Metric{
+ "foo": "bar",
+ },
+ Value: SampleValue(math.Inf(1)),
+ Timestamp: 1234,
+ },
+ },
+ },
+ }
+
+ for _, test := range input {
+ b, err := json.Marshal(test.value)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if string(b) != test.plain {
+ t.Errorf("encoding error: expected %q, got %q", test.plain, b)
+ continue
+ }
+
+ var vec Vector
+ err = json.Unmarshal(b, &vec)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if !reflect.DeepEqual(vec, test.value) {
+ t.Errorf("decoding error: expected %v, got %v", test.value, vec)
+ }
+ }
+}
+
+func TestScalarJSON(t *testing.T) {
+ input := []struct {
+ plain string
+ value Scalar
+ }{
+ {
+ plain: `[123.456,"456"]`,
+ value: Scalar{
+ Timestamp: 123456,
+ Value: 456,
+ },
+ },
+ {
+ plain: `[123123.456,"+Inf"]`,
+ value: Scalar{
+ Timestamp: 123123456,
+ Value: SampleValue(math.Inf(1)),
+ },
+ },
+ {
+ plain: `[123123.456,"-Inf"]`,
+ value: Scalar{
+ Timestamp: 123123456,
+ Value: SampleValue(math.Inf(-1)),
+ },
+ },
+ }
+
+ for _, test := range input {
+ b, err := json.Marshal(test.value)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if string(b) != test.plain {
+ t.Errorf("encoding error: expected %q, got %q", test.plain, b)
+ continue
+ }
+
+ var sv Scalar
+ err = json.Unmarshal(b, &sv)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if sv != test.value {
+ t.Errorf("decoding error: expected %v, got %v", test.value, sv)
+ }
+ }
+}
+
+func TestStringJSON(t *testing.T) {
+ input := []struct {
+ plain string
+ value String
+ }{
+ {
+ plain: `[123.456,"test"]`,
+ value: String{
+ Timestamp: 123456,
+ Value: "test",
+ },
+ },
+ {
+ plain: `[123123.456,"台北"]`,
+ value: String{
+ Timestamp: 123123456,
+ Value: "台北",
+ },
+ },
+ }
+
+ for _, test := range input {
+ b, err := json.Marshal(test.value)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if string(b) != test.plain {
+ t.Errorf("encoding error: expected %q, got %q", test.plain, b)
+ continue
+ }
+
+ var sv String
+ err = json.Unmarshal(b, &sv)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if sv != test.value {
+ t.Errorf("decoding error: expected %v, got %v", test.value, sv)
+ }
+ }
+}
+
+func TestVectorSort(t *testing.T) {
+ input := Vector{
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "A",
+ },
+ Timestamp: 1,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "A",
+ },
+ Timestamp: 2,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "C",
+ },
+ Timestamp: 1,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "C",
+ },
+ Timestamp: 2,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "B",
+ },
+ Timestamp: 1,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "B",
+ },
+ Timestamp: 2,
+ },
+ }
+
+ expected := Vector{
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "A",
+ },
+ Timestamp: 1,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "A",
+ },
+ Timestamp: 2,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "B",
+ },
+ Timestamp: 1,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "B",
+ },
+ Timestamp: 2,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "C",
+ },
+ Timestamp: 1,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "C",
+ },
+ Timestamp: 2,
+ },
+ }
+
+ sort.Sort(input)
+
+ for i, actual := range input {
+ actualFp := actual.Metric.Fingerprint()
+ expectedFp := expected[i].Metric.Fingerprint()
+
+ if actualFp != expectedFp {
+ t.Fatalf("%d. Incorrect fingerprint. Got %s; want %s", i, actualFp.String(), expectedFp.String())
+ }
+
+ if actual.Timestamp != expected[i].Timestamp {
+ t.Fatalf("%d. Incorrect timestamp. Got %s; want %s", i, actual.Timestamp, expected[i].Timestamp)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/common/route/route.go b/vendor/github.com/prometheus/common/route/route.go
new file mode 100644
index 000000000..930b52d4f
--- /dev/null
+++ b/vendor/github.com/prometheus/common/route/route.go
@@ -0,0 +1,137 @@
+package route
+
+import (
+ "fmt"
+ "net/http"
+ "sync"
+
+ "github.com/julienschmidt/httprouter"
+ "golang.org/x/net/context"
+)
+
+var (
+ mtx = sync.RWMutex{}
+ ctxts = map[*http.Request]context.Context{}
+)
+
+// Context returns the context for the request.
+func Context(r *http.Request) context.Context {
+ mtx.RLock()
+ defer mtx.RUnlock()
+ return ctxts[r]
+}
+
+type param string
+
+// Param returns param p for the context.
+func Param(ctx context.Context, p string) string {
+ return ctx.Value(param(p)).(string)
+}
+
+// WithParam returns a new context with param p set to v.
+func WithParam(ctx context.Context, p, v string) context.Context {
+ return context.WithValue(ctx, param(p), v)
+}
+
+type contextFn func(r *http.Request) (context.Context, error)
+
+// Router wraps httprouter.Router and adds support for prefixed sub-routers
+// and per-request context injections.
+type Router struct {
+ rtr *httprouter.Router
+ prefix string
+ ctxFn contextFn
+}
+
+// New returns a new Router.
+func New(ctxFn contextFn) *Router {
+ if ctxFn == nil {
+ ctxFn = func(r *http.Request) (context.Context, error) {
+ return context.Background(), nil
+ }
+ }
+ return &Router{
+ rtr: httprouter.New(),
+ ctxFn: ctxFn,
+ }
+}
+
+// WithPrefix returns a router that prefixes all registered routes with prefix.
+func (r *Router) WithPrefix(prefix string) *Router {
+ return &Router{rtr: r.rtr, prefix: r.prefix + prefix, ctxFn: r.ctxFn}
+}
+
+// handle turns a HandlerFunc into an httprouter.Handle.
+func (r *Router) handle(h http.HandlerFunc) httprouter.Handle {
+ return func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
+ reqCtx, err := r.ctxFn(req)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Error creating request context: %v", err), http.StatusBadRequest)
+ return
+ }
+ ctx, cancel := context.WithCancel(reqCtx)
+ defer cancel()
+
+ for _, p := range params {
+ ctx = context.WithValue(ctx, param(p.Key), p.Value)
+ }
+
+ mtx.Lock()
+ ctxts[req] = ctx
+ mtx.Unlock()
+
+ h(w, req)
+
+ mtx.Lock()
+ delete(ctxts, req)
+ mtx.Unlock()
+ }
+}
+
+// Get registers a new GET route.
+func (r *Router) Get(path string, h http.HandlerFunc) {
+ r.rtr.GET(r.prefix+path, r.handle(h))
+}
+
+// Options registers a new OPTIONS route.
+func (r *Router) Options(path string, h http.HandlerFunc) {
+ r.rtr.OPTIONS(r.prefix+path, r.handle(h))
+}
+
+// Del registers a new DELETE route.
+func (r *Router) Del(path string, h http.HandlerFunc) {
+ r.rtr.DELETE(r.prefix+path, r.handle(h))
+}
+
+// Put registers a new PUT route.
+func (r *Router) Put(path string, h http.HandlerFunc) {
+ r.rtr.PUT(r.prefix+path, r.handle(h))
+}
+
+// Post registers a new POST route.
+func (r *Router) Post(path string, h http.HandlerFunc) {
+ r.rtr.POST(r.prefix+path, r.handle(h))
+}
+
+// Redirect takes an absolute path and sends an internal HTTP redirect for it,
+// prefixed by the router's path prefix. Note that this method does not include
+// functionality for handling relative paths or full URL redirects.
+func (r *Router) Redirect(w http.ResponseWriter, req *http.Request, path string, code int) {
+ http.Redirect(w, req, r.prefix+path, code)
+}
+
+// ServeHTTP implements http.Handler.
+func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ r.rtr.ServeHTTP(w, req)
+}
+
+// FileServe returns a new http.HandlerFunc that serves files from dir.
+// Using routes must provide the *filepath parameter.
+func FileServe(dir string) http.HandlerFunc {
+ fs := http.FileServer(http.Dir(dir))
+
+ return func(w http.ResponseWriter, r *http.Request) {
+ r.URL.Path = Param(Context(r), "filepath")
+ fs.ServeHTTP(w, r)
+ }
+}
diff --git a/vendor/github.com/prometheus/common/route/route_test.go b/vendor/github.com/prometheus/common/route/route_test.go
new file mode 100644
index 000000000..4055d69d5
--- /dev/null
+++ b/vendor/github.com/prometheus/common/route/route_test.go
@@ -0,0 +1,75 @@
+package route
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "golang.org/x/net/context"
+)
+
+func TestRedirect(t *testing.T) {
+ router := New(nil).WithPrefix("/test/prefix")
+ w := httptest.NewRecorder()
+ r, err := http.NewRequest("GET", "http://localhost:9090/foo", nil)
+ if err != nil {
+ t.Fatalf("Error building test request: %s", err)
+ }
+
+ router.Redirect(w, r, "/some/endpoint", http.StatusFound)
+ if w.Code != http.StatusFound {
+ t.Fatalf("Unexpected redirect status code: got %d, want %d", w.Code, http.StatusFound)
+ }
+
+ want := "/test/prefix/some/endpoint"
+ got := w.Header()["Location"][0]
+ if want != got {
+ t.Fatalf("Unexpected redirect location: got %s, want %s", got, want)
+ }
+}
+
+func TestContextFn(t *testing.T) {
+ router := New(func(r *http.Request) (context.Context, error) {
+ return context.WithValue(context.Background(), "testkey", "testvalue"), nil
+ })
+
+ router.Get("/test", func(w http.ResponseWriter, r *http.Request) {
+ want := "testvalue"
+ got := Context(r).Value("testkey")
+ if want != got {
+ t.Fatalf("Unexpected context value: want %q, got %q", want, got)
+ }
+ })
+
+ r, err := http.NewRequest("GET", "http://localhost:9090/test", nil)
+ if err != nil {
+ t.Fatalf("Error building test request: %s", err)
+ }
+ router.ServeHTTP(nil, r)
+}
+
+func TestContextFnError(t *testing.T) {
+ router := New(func(r *http.Request) (context.Context, error) {
+ return context.Background(), fmt.Errorf("test error")
+ })
+
+ router.Get("/test", func(w http.ResponseWriter, r *http.Request) {})
+
+ r, err := http.NewRequest("GET", "http://localhost:9090/test", nil)
+ if err != nil {
+ t.Fatalf("Error building test request: %s", err)
+ }
+ w := httptest.NewRecorder()
+ router.ServeHTTP(w, r)
+
+ if w.Code != http.StatusBadRequest {
+ t.Fatalf("Unexpected response status: got %q, want %q", w.Code, http.StatusBadRequest)
+ }
+
+ want := "Error creating request context: test error\n"
+ got := w.Body.String()
+ if want != got {
+ t.Fatalf("Unexpected response body: got %q, want %q", got, want)
+ }
+}
diff --git a/vendor/github.com/prometheus/common/version/info.go b/vendor/github.com/prometheus/common/version/info.go
new file mode 100644
index 000000000..84489a510
--- /dev/null
+++ b/vendor/github.com/prometheus/common/version/info.go
@@ -0,0 +1,89 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "strings"
+ "text/template"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// Build information. Populated at build-time.
+var (
+ Version string
+ Revision string
+ Branch string
+ BuildUser string
+ BuildDate string
+ GoVersion = runtime.Version()
+)
+
+// NewCollector returns a collector which exports metrics about current version information.
+func NewCollector(program string) *prometheus.GaugeVec {
+ buildInfo := prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: program,
+ Name: "build_info",
+ Help: fmt.Sprintf(
+ "A metric with a constant '1' value labeled by version, revision, branch, and goversion from which %s was built.",
+ program,
+ ),
+ },
+ []string{"version", "revision", "branch", "goversion"},
+ )
+ buildInfo.WithLabelValues(Version, Revision, Branch, GoVersion).Set(1)
+ return buildInfo
+}
+
+// versionInfoTmpl contains the template used by Info.
+var versionInfoTmpl = `
+{{.program}}, version {{.version}} (branch: {{.branch}}, revision: {{.revision}})
+ build user: {{.buildUser}}
+ build date: {{.buildDate}}
+ go version: {{.goVersion}}
+`
+
+// Print returns version information.
+func Print(program string) string {
+ m := map[string]string{
+ "program": program,
+ "version": Version,
+ "revision": Revision,
+ "branch": Branch,
+ "buildUser": BuildUser,
+ "buildDate": BuildDate,
+ "goVersion": GoVersion,
+ }
+ t := template.Must(template.New("version").Parse(versionInfoTmpl))
+
+ var buf bytes.Buffer
+ if err := t.ExecuteTemplate(&buf, "version", m); err != nil {
+ panic(err)
+ }
+ return strings.TrimSpace(buf.String())
+}
+
+// Info returns version, branch and revision information.
+func Info() string {
+ return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, Revision)
+}
+
+// BuildContext returns goVersion, buildUser and buildDate information.
+func BuildContext() string {
+ return fmt.Sprintf("(go=%s, user=%s, date=%s)", GoVersion, BuildUser, BuildDate)
+}
diff --git a/vendor/github.com/prometheus/procfs/.travis.yml b/vendor/github.com/prometheus/procfs/.travis.yml
new file mode 100644
index 000000000..2b4554da5
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/.travis.yml
@@ -0,0 +1,5 @@
+sudo: false
+language: go
+go:
+ - 1.5
+ - 1.6
diff --git a/vendor/github.com/prometheus/procfs/AUTHORS.md b/vendor/github.com/prometheus/procfs/AUTHORS.md
new file mode 100644
index 000000000..0c802dd87
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/AUTHORS.md
@@ -0,0 +1,20 @@
+The Prometheus project was started by Matt T. Proud (emeritus) and
+Julius Volz in 2012.
+
+Maintainers of this repository:
+
+* Tobias Schmidt <ts@soundcloud.com>
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Armen Baghumian <abaghumian@noggin.com.au>
+* Bjoern Rabenstein <beorn@soundcloud.com>
+* David Cournapeau <cournape@gmail.com>
+* Ji-Hoon, Seol <jihoon.seol@gmail.com>
+* Jonas Große Sundrup <cherti@letopolis.de>
+* Julius Volz <julius.volz@gmail.com>
+* Matthias Rampke <mr@soundcloud.com>
+* Nicky Gerritsen <nicky@streamone.nl>
+* Rémi Audebert <contact@halfr.net>
+* Tobias Schmidt <tobidt@gmail.com>
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
new file mode 100644
index 000000000..5705f0fbe
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile
new file mode 100644
index 000000000..c264a49d1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/Makefile
@@ -0,0 +1,6 @@
+ci:
+ ! gofmt -l *.go | read nothing
+ go vet
+ go test -v ./...
+ go get github.com/golang/lint/golint
+ golint *.go
diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE
new file mode 100644
index 000000000..53c5e9aa1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/NOTICE
@@ -0,0 +1,7 @@
+procfs provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+Copyright 2014-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
new file mode 100644
index 000000000..6e7ee6b8b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -0,0 +1,10 @@
+# procfs
+
+This procfs package provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+*WARNING*: This package is a work in progress. Its API may still break in
+backwards-incompatible ways without warnings. Use it at your own risk.
+
+[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
+[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
new file mode 100644
index 000000000..e2acd6d40
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2014 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package procfs provides functions to retrieve system, kernel and process
+// metrics from the pseudo-filesystem proc.
+//
+// Example:
+//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+//
+// "github.com/prometheus/procfs"
+// )
+//
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
+//
+// stat, err := p.NewStat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
+//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
+//
+package procfs
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/cmdline b/vendor/github.com/prometheus/procfs/fixtures/26231/cmdline
new file mode 100644
index 000000000..d2d8ef887
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26231/cmdline
Binary files differ
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/comm b/vendor/github.com/prometheus/procfs/fixtures/26231/comm
new file mode 100644
index 000000000..f027e0d4b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26231/comm
@@ -0,0 +1 @@
+vim
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/exe b/vendor/github.com/prometheus/procfs/fixtures/26231/exe
new file mode 120000
index 000000000..a91bec4da
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26231/exe
@@ -0,0 +1 @@
+/usr/bin/vim \ No newline at end of file
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/0 b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/0
new file mode 120000
index 000000000..da9c5dff3
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/0
@@ -0,0 +1 @@
+../../symlinktargets/abc \ No newline at end of file
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/1 b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/1
new file mode 120000
index 000000000..ca47b50ca
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/1
@@ -0,0 +1 @@
+../../symlinktargets/def \ No newline at end of file
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/10 b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/10
new file mode 120000
index 000000000..c08683168
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/10
@@ -0,0 +1 @@
+../../symlinktargets/xyz \ No newline at end of file
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/2 b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/2
new file mode 120000
index 000000000..66731c068
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/2
@@ -0,0 +1 @@
+../../symlinktargets/ghi \ No newline at end of file
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/3 b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/3
new file mode 120000
index 000000000..0135dce35
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/3
@@ -0,0 +1 @@
+../../symlinktargets/uvw \ No newline at end of file
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/io b/vendor/github.com/prometheus/procfs/fixtures/26231/io
new file mode 100644
index 000000000..b6210a7a7
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26231/io
@@ -0,0 +1,7 @@
+rchar: 750339
+wchar: 818609
+syscr: 7405
+syscw: 5245
+read_bytes: 1024
+write_bytes: 2048
+cancelled_write_bytes: -1024
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/limits b/vendor/github.com/prometheus/procfs/fixtures/26231/limits
new file mode 100644
index 000000000..23c6b6898
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26231/limits
@@ -0,0 +1,17 @@
+Limit Soft Limit Hard Limit Units
+Max cpu time unlimited unlimited seconds
+Max file size unlimited unlimited bytes
+Max data size unlimited unlimited bytes
+Max stack size 8388608 unlimited bytes
+Max core file size 0 unlimited bytes
+Max resident set unlimited unlimited bytes
+Max processes 62898 62898 processes
+Max open files 2048 4096 files
+Max locked memory 65536 65536 bytes
+Max address space unlimited unlimited bytes
+Max file locks unlimited unlimited locks
+Max pending signals 62898 62898 signals
+Max msgqueue size 819200 819200 bytes
+Max nice priority 0 0
+Max realtime priority 0 0
+Max realtime timeout unlimited unlimited us
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/stat b/vendor/github.com/prometheus/procfs/fixtures/26231/stat
new file mode 100644
index 000000000..438aaa9dc
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26231/stat
@@ -0,0 +1 @@
+26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/cmdline b/vendor/github.com/prometheus/procfs/fixtures/26232/cmdline
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26232/cmdline
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/comm b/vendor/github.com/prometheus/procfs/fixtures/26232/comm
new file mode 100644
index 000000000..62361ca78
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26232/comm
@@ -0,0 +1 @@
+ata_sff
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/0 b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/0
new file mode 120000
index 000000000..da9c5dff3
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/0
@@ -0,0 +1 @@
+../../symlinktargets/abc \ No newline at end of file
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/1 b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/1
new file mode 120000
index 000000000..ca47b50ca
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/1
@@ -0,0 +1 @@
+../../symlinktargets/def \ No newline at end of file
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/2 b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/2
new file mode 120000
index 000000000..66731c068
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/2
@@ -0,0 +1 @@
+../../symlinktargets/ghi \ No newline at end of file
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/3 b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/3
new file mode 120000
index 000000000..0135dce35
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/3
@@ -0,0 +1 @@
+../../symlinktargets/uvw \ No newline at end of file
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/4 b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/4
new file mode 120000
index 000000000..c08683168
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/4
@@ -0,0 +1 @@
+../../symlinktargets/xyz \ No newline at end of file
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/limits b/vendor/github.com/prometheus/procfs/fixtures/26232/limits
new file mode 100644
index 000000000..3f9bf16a9
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26232/limits
@@ -0,0 +1,17 @@
+Limit Soft Limit Hard Limit Units
+Max cpu time unlimited unlimited seconds
+Max file size unlimited unlimited bytes
+Max data size unlimited unlimited bytes
+Max stack size 8388608 unlimited bytes
+Max core file size 0 unlimited bytes
+Max resident set unlimited unlimited bytes
+Max processes 29436 29436 processes
+Max open files 1024 4096 files
+Max locked memory 65536 65536 bytes
+Max address space unlimited unlimited bytes
+Max file locks unlimited unlimited locks
+Max pending signals 29436 29436 signals
+Max msgqueue size 819200 819200 bytes
+Max nice priority 0 0
+Max realtime priority 0 0
+Max realtime timeout unlimited unlimited us
diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/stat b/vendor/github.com/prometheus/procfs/fixtures/26232/stat
new file mode 100644
index 000000000..321b16073
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/26232/stat
@@ -0,0 +1 @@
+33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0
diff --git a/vendor/github.com/prometheus/procfs/fixtures/584/stat b/vendor/github.com/prometheus/procfs/fixtures/584/stat
new file mode 100644
index 000000000..65b9369d1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/584/stat
@@ -0,0 +1,2 @@
+1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0
+#!/bin/cat /proc/self/stat
diff --git a/vendor/github.com/prometheus/procfs/fixtures/mdstat b/vendor/github.com/prometheus/procfs/fixtures/mdstat
new file mode 100644
index 000000000..4430bdee2
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/mdstat
@@ -0,0 +1,26 @@
+Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
+md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9]
+ 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU]
+
+md127 : active raid1 sdi2[0] sdj2[1]
+ 312319552 blocks [2/2] [UU]
+
+md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1]
+ 248896 blocks [2/2] [UU]
+
+md4 : inactive raid1 sda3[0] sdb3[1]
+ 4883648 blocks [2/2] [UU]
+
+md6 : active raid1 sdb2[2] sda2[0]
+ 195310144 blocks [2/1] [U_]
+ [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
+
+md8 : active raid1 sdb1[1] sda1[0]
+ 195310144 blocks [2/2] [UU]
+ [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
+
+md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1]
+ 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU]
+ bitmap: 0/30 pages [0KB], 65536KB chunk
+
+unused devices: <none>
diff --git a/vendor/github.com/prometheus/procfs/fixtures/net/ip_vs b/vendor/github.com/prometheus/procfs/fixtures/net/ip_vs
new file mode 100644
index 000000000..6a6a97d7d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/net/ip_vs
@@ -0,0 +1,14 @@
+IP Virtual Server version 1.2.1 (size=4096)
+Prot LocalAddress:Port Scheduler Flags
+ -> RemoteAddress:Port Forward Weight ActiveConn InActConn
+TCP C0A80016:0CEA wlc
+ -> C0A85216:0CEA Tunnel 100 248 2
+ -> C0A85318:0CEA Tunnel 100 248 2
+ -> C0A85315:0CEA Tunnel 100 248 1
+TCP C0A80039:0CEA wlc
+ -> C0A85416:0CEA Tunnel 0 0 0
+ -> C0A85215:0CEA Tunnel 100 1499 0
+ -> C0A83215:0CEA Tunnel 100 1498 0
+TCP C0A80037:0CEA wlc
+ -> C0A8321A:0CEA Tunnel 0 0 0
+ -> C0A83120:0CEA Tunnel 100 0 0
diff --git a/vendor/github.com/prometheus/procfs/fixtures/net/ip_vs_stats b/vendor/github.com/prometheus/procfs/fixtures/net/ip_vs_stats
new file mode 100644
index 000000000..c00724e0f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/net/ip_vs_stats
@@ -0,0 +1,6 @@
+ Total Incoming Outgoing Incoming Outgoing
+ Conns Packets Packets Bytes Bytes
+ 16AA370 E33656E5 0 51D8C8883AB3 0
+
+ Conns/s Pkts/s Pkts/s Bytes/s Bytes/s
+ 4 1FB3C 0 1282A8F 0
diff --git a/vendor/github.com/prometheus/procfs/fixtures/self b/vendor/github.com/prometheus/procfs/fixtures/self
new file mode 120000
index 000000000..1eeedea3d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/self
@@ -0,0 +1 @@
+26231 \ No newline at end of file
diff --git a/vendor/github.com/prometheus/procfs/fixtures/stat b/vendor/github.com/prometheus/procfs/fixtures/stat
new file mode 100644
index 000000000..dabb96f74
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/stat
@@ -0,0 +1,16 @@
+cpu 301854 612 111922 8979004 3552 2 3944 0 0 0
+cpu0 44490 19 21045 1087069 220 1 3410 0 0 0
+cpu1 47869 23 16474 1110787 591 0 46 0 0 0
+cpu2 46504 36 15916 1112321 441 0 326 0 0 0
+cpu3 47054 102 15683 1113230 533 0 60 0 0 0
+cpu4 28413 25 10776 1140321 217 0 8 0 0 0
+cpu5 29271 101 11586 1136270 672 0 30 0 0 0
+cpu6 29152 36 10276 1139721 319 0 29 0 0 0
+cpu7 29098 268 10164 1139282 555 0 31 0 0 0
+intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ctxt 38014093
+btime 1418183276
+processes 26442
+procs_running 2
+procs_blocked 0
+softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444
diff --git a/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/README b/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/README
new file mode 100644
index 000000000..5cf184ea0
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/README
@@ -0,0 +1,2 @@
+This directory contains some empty files that are the symlinks the files in the "fd" directory point to.
+They are otherwise ignored by the tests
diff --git a/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/abc b/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/abc
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/abc
diff --git a/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/def b/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/def
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/def
diff --git a/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/ghi b/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/ghi
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/ghi
diff --git a/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/uvw b/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/uvw
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/uvw
diff --git a/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/xyz b/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/xyz
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures/symlinktargets/xyz
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
new file mode 100644
index 000000000..49aaab050
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -0,0 +1,33 @@
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "path"
+)
+
+// FS represents the pseudo-filesystem proc, which provides an interface to
+// kernel data structures.
+type FS string
+
+// DefaultMountPoint is the common mount point of the proc filesystem.
+const DefaultMountPoint = "/proc"
+
+// NewFS returns a new FS mounted under the given mountPoint. It will error
+// if the mount point can't be read.
+func NewFS(mountPoint string) (FS, error) {
+ info, err := os.Stat(mountPoint)
+ if err != nil {
+ return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
+ }
+ if !info.IsDir() {
+ return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
+ }
+
+ return FS(mountPoint), nil
+}
+
+// Path returns the path of the given subsystem relative to the procfs root.
+func (fs FS) Path(p ...string) string {
+ return path.Join(append([]string{string(fs)}, p...)...)
+}
diff --git a/vendor/github.com/prometheus/procfs/fs_test.go b/vendor/github.com/prometheus/procfs/fs_test.go
new file mode 100644
index 000000000..91f1c6c97
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs_test.go
@@ -0,0 +1,13 @@
+package procfs
+
+import "testing"
+
+func TestNewFS(t *testing.T) {
+ if _, err := NewFS("foobar"); err == nil {
+ t.Error("want NewFS to fail for non-existing mount point")
+ }
+
+ if _, err := NewFS("procfs.go"); err == nil {
+ t.Error("want NewFS to fail if mount point is not a directory")
+ }
+}
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
new file mode 100644
index 000000000..e7012f732
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -0,0 +1,224 @@
+package procfs
+
+import (
+ "bufio"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
+type IPVSStats struct {
+ // Total count of connections.
+ Connections uint64
+ // Total incoming packages processed.
+ IncomingPackets uint64
+ // Total outgoing packages processed.
+ OutgoingPackets uint64
+ // Total incoming traffic.
+ IncomingBytes uint64
+ // Total outgoing traffic.
+ OutgoingBytes uint64
+}
+
+// IPVSBackendStatus holds current metrics of one virtual / real address pair.
+type IPVSBackendStatus struct {
+ // The local (virtual) IP address.
+ LocalAddress net.IP
+ // The local (virtual) port.
+ LocalPort uint16
+ // The transport protocol (TCP, UDP).
+ Proto string
+ // The remote (real) IP address.
+ RemoteAddress net.IP
+ // The remote (real) port.
+ RemotePort uint16
+ // The current number of active connections for this virtual/real address pair.
+ ActiveConn uint64
+ // The current number of inactive connections for this virtual/real address pair.
+ InactConn uint64
+ // The current weight of this virtual/real address pair.
+ Weight uint64
+}
+
+// NewIPVSStats reads the IPVS statistics.
+func NewIPVSStats() (IPVSStats, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return fs.NewIPVSStats()
+}
+
+// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
+func (fs FS) NewIPVSStats() (IPVSStats, error) {
+ file, err := os.Open(fs.Path("net/ip_vs_stats"))
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ defer file.Close()
+
+ return parseIPVSStats(file)
+}
+
+// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
+func parseIPVSStats(file io.Reader) (IPVSStats, error) {
+ var (
+ statContent []byte
+ statLines []string
+ statFields []string
+ stats IPVSStats
+ )
+
+ statContent, err := ioutil.ReadAll(file)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ statLines = strings.SplitN(string(statContent), "\n", 4)
+ if len(statLines) != 4 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
+ }
+
+ statFields = strings.Fields(statLines[2])
+ if len(statFields) != 5 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
+ }
+
+ stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return stats, nil
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.
+func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return []IPVSBackendStatus{}, err
+ }
+
+ return fs.NewIPVSBackendStatus()
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
+func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ file, err := os.Open(fs.Path("net/ip_vs"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return parseIPVSBackendStatus(file)
+}
+
+func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
+ var (
+ status []IPVSBackendStatus
+ scanner = bufio.NewScanner(file)
+ proto string
+ localAddress net.IP
+ localPort uint16
+ err error
+ )
+
+ for scanner.Scan() {
+ fields := strings.Fields(string(scanner.Text()))
+ if len(fields) == 0 {
+ continue
+ }
+ switch {
+ case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
+ continue
+ case fields[0] == "TCP" || fields[0] == "UDP":
+ if len(fields) < 2 {
+ continue
+ }
+ proto = fields[0]
+ localAddress, localPort, err = parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ case fields[0] == "->":
+ if len(fields) < 6 {
+ continue
+ }
+ remoteAddress, remotePort, err := parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ weight, err := strconv.ParseUint(fields[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ activeConn, err := strconv.ParseUint(fields[4], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ inactConn, err := strconv.ParseUint(fields[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ status = append(status, IPVSBackendStatus{
+ LocalAddress: localAddress,
+ LocalPort: localPort,
+ RemoteAddress: remoteAddress,
+ RemotePort: remotePort,
+ Proto: proto,
+ Weight: weight,
+ ActiveConn: activeConn,
+ InactConn: inactConn,
+ })
+ }
+ }
+ return status, nil
+}
+
+func parseIPPort(s string) (net.IP, uint16, error) {
+ tmp := strings.SplitN(s, ":", 2)
+
+ if len(tmp) != 2 {
+ return nil, 0, fmt.Errorf("invalid IP:Port: %s", s)
+ }
+
+ if len(tmp[0]) != 8 && len(tmp[0]) != 32 {
+ return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0])
+ }
+
+ ip, err := hex.DecodeString(tmp[0])
+ if err != nil {
+ return nil, 0, err
+ }
+
+ port, err := strconv.ParseUint(tmp[1], 16, 16)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return ip, uint16(port), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/ipvs_test.go b/vendor/github.com/prometheus/procfs/ipvs_test.go
new file mode 100644
index 000000000..c836c23ac
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/ipvs_test.go
@@ -0,0 +1,190 @@
+package procfs
+
+import (
+ "net"
+ "testing"
+)
+
+var (
+ expectedIPVSStats = IPVSStats{
+ Connections: 23765872,
+ IncomingPackets: 3811989221,
+ OutgoingPackets: 0,
+ IncomingBytes: 89991519156915,
+ OutgoingBytes: 0,
+ }
+ expectedIPVSBackendStatuses = []IPVSBackendStatus{
+ IPVSBackendStatus{
+ LocalAddress: net.ParseIP("192.168.0.22"),
+ LocalPort: 3306,
+ RemoteAddress: net.ParseIP("192.168.82.22"),
+ RemotePort: 3306,
+ Proto: "TCP",
+ Weight: 100,
+ ActiveConn: 248,
+ InactConn: 2,
+ },
+ IPVSBackendStatus{
+ LocalAddress: net.ParseIP("192.168.0.22"),
+ LocalPort: 3306,
+ RemoteAddress: net.ParseIP("192.168.83.24"),
+ RemotePort: 3306,
+ Proto: "TCP",
+ Weight: 100,
+ ActiveConn: 248,
+ InactConn: 2,
+ },
+ IPVSBackendStatus{
+ LocalAddress: net.ParseIP("192.168.0.22"),
+ LocalPort: 3306,
+ RemoteAddress: net.ParseIP("192.168.83.21"),
+ RemotePort: 3306,
+ Proto: "TCP",
+ Weight: 100,
+ ActiveConn: 248,
+ InactConn: 1,
+ },
+ IPVSBackendStatus{
+ LocalAddress: net.ParseIP("192.168.0.57"),
+ LocalPort: 3306,
+ RemoteAddress: net.ParseIP("192.168.84.22"),
+ RemotePort: 3306,
+ Proto: "TCP",
+ Weight: 0,
+ ActiveConn: 0,
+ InactConn: 0,
+ },
+ IPVSBackendStatus{
+ LocalAddress: net.ParseIP("192.168.0.57"),
+ LocalPort: 3306,
+ RemoteAddress: net.ParseIP("192.168.82.21"),
+ RemotePort: 3306,
+ Proto: "TCP",
+ Weight: 100,
+ ActiveConn: 1499,
+ InactConn: 0,
+ },
+ IPVSBackendStatus{
+ LocalAddress: net.ParseIP("192.168.0.57"),
+ LocalPort: 3306,
+ RemoteAddress: net.ParseIP("192.168.50.21"),
+ RemotePort: 3306,
+ Proto: "TCP",
+ Weight: 100,
+ ActiveConn: 1498,
+ InactConn: 0,
+ },
+ IPVSBackendStatus{
+ LocalAddress: net.ParseIP("192.168.0.55"),
+ LocalPort: 3306,
+ RemoteAddress: net.ParseIP("192.168.50.26"),
+ RemotePort: 3306,
+ Proto: "TCP",
+ Weight: 0,
+ ActiveConn: 0,
+ InactConn: 0,
+ },
+ IPVSBackendStatus{
+ LocalAddress: net.ParseIP("192.168.0.55"),
+ LocalPort: 3306,
+ RemoteAddress: net.ParseIP("192.168.49.32"),
+ RemotePort: 3306,
+ Proto: "TCP",
+ Weight: 100,
+ ActiveConn: 0,
+ InactConn: 0,
+ },
+ }
+)
+
+func TestIPVSStats(t *testing.T) {
+ stats, err := FS("fixtures").NewIPVSStats()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if stats != expectedIPVSStats {
+ t.Errorf("want %+v, have %+v", expectedIPVSStats, stats)
+ }
+}
+
+func TestParseIPPort(t *testing.T) {
+ ip := net.ParseIP("192.168.0.22")
+ port := uint16(3306)
+
+ gotIP, gotPort, err := parseIPPort("C0A80016:0CEA")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(gotIP.Equal(ip) && port == gotPort) {
+ t.Errorf("want %s:%d, have %s:%d", ip, port, gotIP, gotPort)
+ }
+}
+
+func TestParseIPPortInvalid(t *testing.T) {
+ testcases := []string{
+ "",
+ "C0A80016",
+ "C0A800:1234",
+ "FOOBARBA:1234",
+ "C0A80016:0CEA:1234",
+ }
+
+ for _, s := range testcases {
+ ip, port, err := parseIPPort(s)
+ if ip != nil || port != uint16(0) || err == nil {
+ t.Errorf("Expected error for input %s, have ip = %s, port = %v, err = %v", s, ip, port, err)
+ }
+ }
+}
+
+func TestParseIPPortIPv6(t *testing.T) {
+ ip := net.ParseIP("dead:beef::1")
+ port := uint16(8080)
+
+ gotIP, gotPort, err := parseIPPort("DEADBEEF000000000000000000000001:1F90")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(gotIP.Equal(ip) && port == gotPort) {
+ t.Errorf("want %s:%d, have %s:%d", ip, port, gotIP, gotPort)
+ }
+
+}
+
+func TestIPVSBackendStatus(t *testing.T) {
+ backendStats, err := FS("fixtures").NewIPVSBackendStatus()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want, have := len(expectedIPVSBackendStatuses), len(backendStats); want != have {
+ t.Fatalf("want %d backend statuses, have %d", want, have)
+ }
+
+ for idx, expect := range expectedIPVSBackendStatuses {
+ if !backendStats[idx].LocalAddress.Equal(expect.LocalAddress) {
+ t.Errorf("want LocalAddress %s, have %s", expect.LocalAddress, backendStats[idx].LocalAddress)
+ }
+ if backendStats[idx].LocalPort != expect.LocalPort {
+ t.Errorf("want LocalPort %d, have %d", expect.LocalPort, backendStats[idx].LocalPort)
+ }
+ if !backendStats[idx].RemoteAddress.Equal(expect.RemoteAddress) {
+ t.Errorf("want RemoteAddress %s, have %s", expect.RemoteAddress, backendStats[idx].RemoteAddress)
+ }
+ if backendStats[idx].RemotePort != expect.RemotePort {
+ t.Errorf("want RemotePort %d, have %d", expect.RemotePort, backendStats[idx].RemotePort)
+ }
+ if backendStats[idx].Proto != expect.Proto {
+ t.Errorf("want Proto %s, have %s", expect.Proto, backendStats[idx].Proto)
+ }
+ if backendStats[idx].Weight != expect.Weight {
+ t.Errorf("want Weight %d, have %d", expect.Weight, backendStats[idx].Weight)
+ }
+ if backendStats[idx].ActiveConn != expect.ActiveConn {
+ t.Errorf("want ActiveConn %d, have %d", expect.ActiveConn, backendStats[idx].ActiveConn)
+ }
+ if backendStats[idx].InactConn != expect.InactConn {
+ t.Errorf("want InactConn %d, have %d", expect.InactConn, backendStats[idx].InactConn)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
new file mode 100644
index 000000000..d7a248c0d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -0,0 +1,138 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
+ buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
+)
+
+// MDStat holds info parsed from /proc/mdstat.
+type MDStat struct {
+ // Name of the device.
+ Name string
+ // activity-state of the device.
+ ActivityState string
+ // Number of active disks.
+ DisksActive int64
+ // Total number of disks the device consists of.
+ DisksTotal int64
+ // Number of blocks the device holds.
+ BlocksTotal int64
+ // Number of blocks on the device that are in sync.
+ BlocksSynced int64
+}
+
+// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
+func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
+ mdStatusFilePath := fs.Path("mdstat")
+ content, err := ioutil.ReadFile(mdStatusFilePath)
+ if err != nil {
+ return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+
+ mdStates := []MDStat{}
+ lines := strings.Split(string(content), "\n")
+ for i, l := range lines {
+ if l == "" {
+ continue
+ }
+ if l[0] == ' ' {
+ continue
+ }
+ if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
+ continue
+ }
+
+ mainLine := strings.Split(l, " ")
+ if len(mainLine) < 3 {
+ return mdStates, fmt.Errorf("error parsing mdline: %s", l)
+ }
+ mdName := mainLine[0]
+ activityState := mainLine[2]
+
+ if len(lines) <= i+3 {
+ return mdStates, fmt.Errorf(
+ "error parsing %s: too few lines for md device %s",
+ mdStatusFilePath,
+ mdName,
+ )
+ }
+
+ active, total, size, err := evalStatusline(lines[i+1])
+ if err != nil {
+ return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+
+ // j is the line number of the syncing-line.
+ j := i + 2
+ if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
+ j = i + 3
+ }
+
+ // If device is syncing at the moment, get the number of currently
+ // synced bytes, otherwise that number equals the size of the device.
+ syncedBlocks := size
+ if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
+ syncedBlocks, err = evalBuildline(lines[j])
+ if err != nil {
+ return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+ }
+
+ mdStates = append(mdStates, MDStat{
+ Name: mdName,
+ ActivityState: activityState,
+ DisksActive: active,
+ DisksTotal: total,
+ BlocksTotal: size,
+ BlocksSynced: syncedBlocks,
+ })
+ }
+
+ return mdStates, nil
+}
+
+func evalStatusline(statusline string) (active, total, size int64, err error) {
+ matches := statuslineRE.FindStringSubmatch(statusline)
+ if len(matches) != 4 {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
+ }
+
+ size, err = strconv.ParseInt(matches[1], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ total, err = strconv.ParseInt(matches[2], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ active, err = strconv.ParseInt(matches[3], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ return active, total, size, nil
+}
+
+func evalBuildline(buildline string) (syncedBlocks int64, err error) {
+ matches := buildlineRE.FindStringSubmatch(buildline)
+ if len(matches) != 2 {
+ return 0, fmt.Errorf("unexpected buildline: %s", buildline)
+ }
+
+ syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
+ }
+
+ return syncedBlocks, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/mdstat_test.go b/vendor/github.com/prometheus/procfs/mdstat_test.go
new file mode 100644
index 000000000..ca5fe4d1b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mdstat_test.go
@@ -0,0 +1,31 @@
+package procfs
+
+import (
+ "testing"
+)
+
+func TestMDStat(t *testing.T) {
+ mdStates, err := FS("fixtures").ParseMDStat()
+ if err != nil {
+ t.Fatalf("parsing of reference-file failed entirely: %s", err)
+ }
+
+ refs := map[string]MDStat{
+ "md3": MDStat{"md3", "active", 8, 8, 5853468288, 5853468288},
+ "md127": MDStat{"md127", "active", 2, 2, 312319552, 312319552},
+ "md0": MDStat{"md0", "active", 2, 2, 248896, 248896},
+ "md4": MDStat{"md4", "inactive", 2, 2, 4883648, 4883648},
+ "md6": MDStat{"md6", "active", 1, 2, 195310144, 16775552},
+ "md8": MDStat{"md8", "active", 2, 2, 195310144, 16775552},
+ "md7": MDStat{"md7", "active", 3, 4, 7813735424, 7813735424},
+ }
+
+ if want, have := len(refs), len(mdStates); want != have {
+ t.Errorf("want %d parsed md-devices, have %d", want, have)
+ }
+ for _, md := range mdStates {
+ if want, have := refs[md.Name], md; want != have {
+ t.Errorf("%s: want %v, have %v", md.Name, want, have)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
new file mode 100644
index 000000000..0d0a6a90f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -0,0 +1,212 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Proc provides information about a running process.
+type Proc struct {
+ // The process ID.
+ PID int
+
+ fs FS
+}
+
+// Procs represents a list of Proc structs.
+type Procs []Proc
+
+func (p Procs) Len() int { return len(p) }
+func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
+
+// Self returns a process for the current process read via /proc/self.
+func Self() (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.Self()
+}
+
+// NewProc returns a process for the given pid under /proc.
+func NewProc(pid int) (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.NewProc(pid)
+}
+
+// AllProcs returns a list of all currently available processes under /proc.
+func AllProcs() (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+ return fs.AllProcs()
+}
+
+// Self returns a process for the current process.
+func (fs FS) Self() (Proc, error) {
+ p, err := os.Readlink(fs.Path("self"))
+ if err != nil {
+ return Proc{}, err
+ }
+ pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1))
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.NewProc(pid)
+}
+
+// NewProc returns a process for the given pid.
+func (fs FS) NewProc(pid int) (Proc, error) {
+ if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: pid, fs: fs}, nil
+}
+
+// AllProcs returns a list of all currently available processes.
+func (fs FS) AllProcs() (Procs, error) {
+ d, err := os.Open(fs.Path())
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ p := Procs{}
+ for _, n := range names {
+ pid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+ p = append(p, Proc{PID: int(pid), fs: fs})
+ }
+
+ return p, nil
+}
+
+// CmdLine returns the command line of a process.
+func (p Proc) CmdLine() ([]string, error) {
+ f, err := os.Open(p.path("cmdline"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(data) < 1 {
+ return []string{}, nil
+ }
+
+ return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
+}
+
+// Comm returns the command name of a process.
+func (p Proc) Comm() (string, error) {
+ f, err := os.Open(p.path("comm"))
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return "", err
+ }
+
+ return strings.TrimSpace(string(data)), nil
+}
+
+// Executable returns the absolute path of the executable command of a process.
+func (p Proc) Executable() (string, error) {
+ exe, err := os.Readlink(p.path("exe"))
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+
+ return exe, err
+}
+
+// FileDescriptors returns the currently open file descriptors of a process.
+func (p Proc) FileDescriptors() ([]uintptr, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ fds := make([]uintptr, len(names))
+ for i, n := range names {
+ fd, err := strconv.ParseInt(n, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse fd %s: %s", n, err)
+ }
+ fds[i] = uintptr(fd)
+ }
+
+ return fds, nil
+}
+
+// FileDescriptorTargets returns the targets of all file descriptors of a process.
+// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
+func (p Proc) FileDescriptorTargets() ([]string, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ targets := make([]string, len(names))
+
+ for i, name := range names {
+ target, err := os.Readlink(p.path("fd", name))
+ if err == nil {
+ targets[i] = target
+ }
+ }
+
+ return targets, nil
+}
+
+// FileDescriptorsLen returns the number of currently open file descriptors of
+// a process.
+func (p Proc) FileDescriptorsLen() (int, error) {
+ fds, err := p.fileDescriptors()
+ if err != nil {
+ return 0, err
+ }
+
+ return len(fds), nil
+}
+
+func (p Proc) fileDescriptors() ([]string, error) {
+ d, err := os.Open(p.path("fd"))
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ return names, nil
+}
+
+func (p Proc) path(pa ...string) string {
+ return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
new file mode 100644
index 000000000..b4e31d7ba
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -0,0 +1,55 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// ProcIO models the content of /proc/<pid>/io.
+type ProcIO struct {
+ // Chars read.
+ RChar uint64
+ // Chars written.
+ WChar uint64
+ // Read syscalls.
+ SyscR uint64
+ // Write syscalls.
+ SyscW uint64
+ // Bytes read.
+ ReadBytes uint64
+ // Bytes written.
+ WriteBytes uint64
+ // Bytes written, but taking into account truncation. See
+ // Documentation/filesystems/proc.txt in the kernel sources for
+ // detailed explanation.
+ CancelledWriteBytes int64
+}
+
+// NewIO creates a new ProcIO instance from a given Proc instance.
+func (p Proc) NewIO() (ProcIO, error) {
+ pio := ProcIO{}
+
+ f, err := os.Open(p.path("io"))
+ if err != nil {
+ return pio, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return pio, err
+ }
+
+ ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
+ "read_bytes: %d\nwrite_bytes: %d\n" +
+ "cancelled_write_bytes: %d\n"
+
+ _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
+ &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
+ if err != nil {
+ return pio, err
+ }
+
+ return pio, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_io_test.go b/vendor/github.com/prometheus/procfs/proc_io_test.go
new file mode 100644
index 000000000..3aa1a1293
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_io_test.go
@@ -0,0 +1,33 @@
+package procfs
+
+import "testing"
+
+func TestProcIO(t *testing.T) {
+ p, err := FS("fixtures").NewProc(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ s, err := p.NewIO()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, test := range []struct {
+ name string
+ want int64
+ have int64
+ }{
+ {name: "RChar", want: 750339, have: int64(s.RChar)},
+ {name: "WChar", want: 818609, have: int64(s.WChar)},
+ {name: "SyscR", want: 7405, have: int64(s.SyscR)},
+ {name: "SyscW", want: 5245, have: int64(s.SyscW)},
+ {name: "ReadBytes", want: 1024, have: int64(s.ReadBytes)},
+ {name: "WriteBytes", want: 2048, have: int64(s.WriteBytes)},
+ {name: "CancelledWriteBytes", want: -1024, have: s.CancelledWriteBytes},
+ } {
+ if test.want != test.have {
+ t.Errorf("want %s %d, have %d", test.name, test.want, test.have)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
new file mode 100644
index 000000000..2df997ce1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -0,0 +1,137 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+)
+
+// ProcLimits represents the soft limits for each of the process's resource
+// limits. For more information see getrlimit(2):
+// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
+type ProcLimits struct {
+ // CPU time limit in seconds.
+ CPUTime int
+ // Maximum size of files that the process may create.
+ FileSize int
+ // Maximum size of the process's data segment (initialized data,
+ // uninitialized data, and heap).
+ DataSize int
+ // Maximum size of the process stack in bytes.
+ StackSize int
+ // Maximum size of a core file.
+ CoreFileSize int
+ // Limit of the process's resident set in pages.
+ ResidentSet int
+ // Maximum number of processes that can be created for the real user ID of
+ // the calling process.
+ Processes int
+ // Value one greater than the maximum file descriptor number that can be
+ // opened by this process.
+ OpenFiles int
+ // Maximum number of bytes of memory that may be locked into RAM.
+ LockedMemory int
+ // Maximum size of the process's virtual memory address space in bytes.
+ AddressSpace int
+ // Limit on the combined number of flock(2) locks and fcntl(2) leases that
+ // this process may establish.
+ FileLocks int
+ // Limit of signals that may be queued for the real user ID of the calling
+ // process.
+ PendingSignals int
+ // Limit on the number of bytes that can be allocated for POSIX message
+ // queues for the real user ID of the calling process.
+ MsqqueueSize int
+ // Limit of the nice priority set using setpriority(2) or nice(2).
+ NicePriority int
+ // Limit of the real-time priority set using sched_setscheduler(2) or
+ // sched_setparam(2).
+ RealtimePriority int
+ // Limit (in microseconds) on the amount of CPU time that a process
+ // scheduled under a real-time scheduling policy may consume without making
+ // a blocking system call.
+ RealtimeTimeout int
+}
+
+const (
+ limitsFields = 3
+ limitsUnlimited = "unlimited"
+)
+
+var (
+ limitsDelimiter = regexp.MustCompile(" +")
+)
+
+// NewLimits returns the current soft limits of the process.
+func (p Proc) NewLimits() (ProcLimits, error) {
+ f, err := os.Open(p.path("limits"))
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ defer f.Close()
+
+ var (
+ l = ProcLimits{}
+ s = bufio.NewScanner(f)
+ )
+ for s.Scan() {
+ fields := limitsDelimiter.Split(s.Text(), limitsFields)
+ if len(fields) != limitsFields {
+ return ProcLimits{}, fmt.Errorf(
+ "couldn't parse %s line %s", f.Name(), s.Text())
+ }
+
+ switch fields[0] {
+ case "Max cpu time":
+ l.CPUTime, err = parseInt(fields[1])
+ case "Max file size":
+ l.FileSize, err = parseInt(fields[1])
+ case "Max data size":
+ l.DataSize, err = parseInt(fields[1])
+ case "Max stack size":
+ l.StackSize, err = parseInt(fields[1])
+ case "Max core file size":
+ l.CoreFileSize, err = parseInt(fields[1])
+ case "Max resident set":
+ l.ResidentSet, err = parseInt(fields[1])
+ case "Max processes":
+ l.Processes, err = parseInt(fields[1])
+ case "Max open files":
+ l.OpenFiles, err = parseInt(fields[1])
+ case "Max locked memory":
+ l.LockedMemory, err = parseInt(fields[1])
+ case "Max address space":
+ l.AddressSpace, err = parseInt(fields[1])
+ case "Max file locks":
+ l.FileLocks, err = parseInt(fields[1])
+ case "Max pending signals":
+ l.PendingSignals, err = parseInt(fields[1])
+ case "Max msgqueue size":
+ l.MsqqueueSize, err = parseInt(fields[1])
+ case "Max nice priority":
+ l.NicePriority, err = parseInt(fields[1])
+ case "Max realtime priority":
+ l.RealtimePriority, err = parseInt(fields[1])
+ case "Max realtime timeout":
+ l.RealtimeTimeout, err = parseInt(fields[1])
+ }
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ }
+
+ return l, s.Err()
+}
+
+func parseInt(s string) (int, error) {
+ if s == limitsUnlimited {
+ return -1, nil
+ }
+ i, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
+ }
+ return int(i), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits_test.go b/vendor/github.com/prometheus/procfs/proc_limits_test.go
new file mode 100644
index 000000000..70bf04ec2
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_limits_test.go
@@ -0,0 +1,31 @@
+package procfs
+
+import "testing"
+
+func TestNewLimits(t *testing.T) {
+ p, err := FS("fixtures").NewProc(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ l, err := p.NewLimits()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, test := range []struct {
+ name string
+ want int
+ have int
+ }{
+ {name: "cpu time", want: -1, have: l.CPUTime},
+ {name: "open files", want: 2048, have: l.OpenFiles},
+ {name: "msgqueue size", want: 819200, have: l.MsqqueueSize},
+ {name: "nice priority", want: 0, have: l.NicePriority},
+ {name: "address space", want: -1, have: l.AddressSpace},
+ } {
+ if test.want != test.have {
+ t.Errorf("want %s %d, have %d", test.name, test.want, test.have)
+ }
+ }
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
new file mode 100644
index 000000000..724e271b9
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -0,0 +1,175 @@
+package procfs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
+// which required cgo. However, that caused a lot of problems regarding
+// cross-compilation. Alternatives such as running a binary to determine the
+// value, or trying to derive it in some other way were all problematic. After
+// much research it was determined that USER_HZ is actually hardcoded to 100 on
+// all Go-supported platforms as of the time of this writing. This is why we
+// decided to hardcode it here as well. It is not impossible that there could
+// be systems with exceptions, but they should be very exotic edge cases, and
+// in that case, the worst outcome will be two misreported metrics.
+//
+// See also the following discussions:
+//
+// - https://github.com/prometheus/node_exporter/issues/52
+// - https://github.com/prometheus/procfs/pull/2
+// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
+const userHZ = 100
+
+// ProcStat provides status information about the process,
+// read from /proc/[pid]/stat.
+type ProcStat struct {
+ // The process ID.
+ PID int
+ // The filename of the executable.
+ Comm string
+ // The process state.
+ State string
+ // The PID of the parent of this process.
+ PPID int
+ // The process group ID of the process.
+ PGRP int
+ // The session ID of the process.
+ Session int
+ // The controlling terminal of the process.
+ TTY int
+ // The ID of the foreground process group of the controlling terminal of
+ // the process.
+ TPGID int
+ // The kernel flags word of the process.
+ Flags uint
+ // The number of minor faults the process has made which have not required
+ // loading a memory page from disk.
+ MinFlt uint
+ // The number of minor faults that the process's waited-for children have
+ // made.
+ CMinFlt uint
+ // The number of major faults the process has made which have required
+ // loading a memory page from disk.
+ MajFlt uint
+ // The number of major faults that the process's waited-for children have
+ // made.
+ CMajFlt uint
+ // Amount of time that this process has been scheduled in user mode,
+ // measured in clock ticks.
+ UTime uint
+ // Amount of time that this process has been scheduled in kernel mode,
+ // measured in clock ticks.
+ STime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in user mode, measured in clock ticks.
+ CUTime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in kernel mode, measured in clock ticks.
+ CSTime uint
+ // For processes running a real-time scheduling policy, this is the negated
+ // scheduling priority, minus one.
+ Priority int
+ // The nice value, a value in the range 19 (low priority) to -20 (high
+ // priority).
+ Nice int
+ // Number of threads in this process.
+ NumThreads int
+ // The time the process started after system boot, the value is expressed
+ // in clock ticks.
+ Starttime uint64
+ // Virtual memory size in bytes.
+ VSize int
+ // Resident set size in pages.
+ RSS int
+
+ fs FS
+}
+
+// NewStat returns the current status information of the process.
+func (p Proc) NewStat() (ProcStat, error) {
+ f, err := os.Open(p.path("stat"))
+ if err != nil {
+ return ProcStat{}, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ var (
+ ignore int
+
+ s = ProcStat{PID: p.PID, fs: p.fs}
+ l = bytes.Index(data, []byte("("))
+ r = bytes.LastIndex(data, []byte(")"))
+ )
+
+ if l < 0 || r < 0 {
+ return ProcStat{}, fmt.Errorf(
+ "unexpected format, couldn't extract comm: %s",
+ data,
+ )
+ }
+
+ s.Comm = string(data[l+1 : r])
+ _, err = fmt.Fscan(
+ bytes.NewBuffer(data[r+2:]),
+ &s.State,
+ &s.PPID,
+ &s.PGRP,
+ &s.Session,
+ &s.TTY,
+ &s.TPGID,
+ &s.Flags,
+ &s.MinFlt,
+ &s.CMinFlt,
+ &s.MajFlt,
+ &s.CMajFlt,
+ &s.UTime,
+ &s.STime,
+ &s.CUTime,
+ &s.CSTime,
+ &s.Priority,
+ &s.Nice,
+ &s.NumThreads,
+ &ignore,
+ &s.Starttime,
+ &s.VSize,
+ &s.RSS,
+ )
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ return s, nil
+}
+
+// VirtualMemory returns the virtual memory size in bytes.
+func (s ProcStat) VirtualMemory() int {
+ return s.VSize
+}
+
+// ResidentMemory returns the resident memory size in bytes.
+func (s ProcStat) ResidentMemory() int {
+ return s.RSS * os.Getpagesize()
+}
+
+// StartTime returns the unix timestamp of the process in seconds.
+func (s ProcStat) StartTime() (float64, error) {
+ stat, err := s.fs.NewStat()
+ if err != nil {
+ return 0, err
+ }
+ return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
+}
+
+// CPUTime returns the total CPU user and system time in seconds.
+func (s ProcStat) CPUTime() float64 {
+ return float64(s.UTime+s.STime) / userHZ
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat_test.go b/vendor/github.com/prometheus/procfs/proc_stat_test.go
new file mode 100644
index 000000000..a2ebcde78
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_stat_test.go
@@ -0,0 +1,110 @@
+package procfs
+
+import (
+ "os"
+ "testing"
+)
+
+func TestProcStat(t *testing.T) {
+ p, err := FS("fixtures").NewProc(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ s, err := p.NewStat()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, test := range []struct {
+ name string
+ want int
+ have int
+ }{
+ {name: "pid", want: 26231, have: s.PID},
+ {name: "user time", want: 1677, have: int(s.UTime)},
+ {name: "system time", want: 44, have: int(s.STime)},
+ {name: "start time", want: 82375, have: int(s.Starttime)},
+ {name: "virtual memory size", want: 56274944, have: s.VSize},
+ {name: "resident set size", want: 1981, have: s.RSS},
+ } {
+ if test.want != test.have {
+ t.Errorf("want %s %d, have %d", test.name, test.want, test.have)
+ }
+ }
+}
+
+func TestProcStatComm(t *testing.T) {
+ s1, err := testProcStat(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want, have := "vim", s1.Comm; want != have {
+ t.Errorf("want comm %s, have %s", want, have)
+ }
+
+ s2, err := testProcStat(584)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want, have := "(a b ) ( c d) ", s2.Comm; want != have {
+ t.Errorf("want comm %s, have %s", want, have)
+ }
+}
+
+func TestProcStatVirtualMemory(t *testing.T) {
+ s, err := testProcStat(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if want, have := 56274944, s.VirtualMemory(); want != have {
+ t.Errorf("want virtual memory %d, have %d", want, have)
+ }
+}
+
+func TestProcStatResidentMemory(t *testing.T) {
+ s, err := testProcStat(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if want, have := 1981*os.Getpagesize(), s.ResidentMemory(); want != have {
+ t.Errorf("want resident memory %d, have %d", want, have)
+ }
+}
+
+func TestProcStatStartTime(t *testing.T) {
+ s, err := testProcStat(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ time, err := s.StartTime()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want, have := 1418184099.75, time; want != have {
+ t.Errorf("want start time %f, have %f", want, have)
+ }
+}
+
+func TestProcStatCPUTime(t *testing.T) {
+ s, err := testProcStat(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if want, have := 17.21, s.CPUTime(); want != have {
+ t.Errorf("want cpu time %f, have %f", want, have)
+ }
+}
+
+func testProcStat(pid int) (ProcStat, error) {
+ p, err := FS("fixtures").NewProc(pid)
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ return p.NewStat()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_test.go b/vendor/github.com/prometheus/procfs/proc_test.go
new file mode 100644
index 000000000..104b3245d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_test.go
@@ -0,0 +1,160 @@
+package procfs
+
+import (
+ "reflect"
+ "sort"
+ "testing"
+)
+
+func TestSelf(t *testing.T) {
+ fs := FS("fixtures")
+
+ p1, err := fs.NewProc(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+ p2, err := fs.Self()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(p1, p2) {
+ t.Errorf("want process %v, have %v", p1, p2)
+ }
+}
+
+func TestAllProcs(t *testing.T) {
+ procs, err := FS("fixtures").AllProcs()
+ if err != nil {
+ t.Fatal(err)
+ }
+ sort.Sort(procs)
+ for i, p := range []*Proc{{PID: 584}, {PID: 26231}} {
+ if want, have := p.PID, procs[i].PID; want != have {
+ t.Errorf("want processes %d, have %d", want, have)
+ }
+ }
+}
+
+func TestCmdLine(t *testing.T) {
+ for _, tt := range []struct {
+ process int
+ want []string
+ }{
+ {process: 26231, want: []string{"vim", "test.go", "+10"}},
+ {process: 26232, want: []string{}},
+ } {
+ p1, err := FS("fixtures").NewProc(tt.process)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c1, err := p1.CmdLine()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(tt.want, c1) {
+ t.Errorf("want cmdline %v, have %v", tt.want, c1)
+ }
+ }
+}
+
+func TestComm(t *testing.T) {
+ for _, tt := range []struct {
+ process int
+ want string
+ }{
+ {process: 26231, want: "vim"},
+ {process: 26232, want: "ata_sff"},
+ } {
+ p1, err := FS("fixtures").NewProc(tt.process)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c1, err := p1.Comm()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(tt.want, c1) {
+ t.Errorf("want comm %v, have %v", tt.want, c1)
+ }
+ }
+}
+
+func TestExecutable(t *testing.T) {
+ for _, tt := range []struct {
+ process int
+ want string
+ }{
+ {process: 26231, want: "/usr/bin/vim"},
+ {process: 26232, want: ""},
+ } {
+ p, err := FS("fixtures").NewProc(tt.process)
+ if err != nil {
+ t.Fatal(err)
+ }
+ exe, err := p.Executable()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(tt.want, exe) {
+ t.Errorf("want absolute path to cmdline %v, have %v", tt.want, exe)
+ }
+ }
+}
+
+func TestFileDescriptors(t *testing.T) {
+ p1, err := FS("fixtures").NewProc(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+ fds, err := p1.FileDescriptors()
+ if err != nil {
+ t.Fatal(err)
+ }
+ sort.Sort(byUintptr(fds))
+ if want := []uintptr{0, 1, 2, 3, 10}; !reflect.DeepEqual(want, fds) {
+ t.Errorf("want fds %v, have %v", want, fds)
+ }
+}
+
+func TestFileDescriptorTargets(t *testing.T) {
+ p1, err := FS("fixtures").NewProc(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+ fds, err := p1.FileDescriptorTargets()
+ if err != nil {
+ t.Fatal(err)
+ }
+ sort.Strings(fds)
+ var want = []string{
+ "../../symlinktargets/abc",
+ "../../symlinktargets/def",
+ "../../symlinktargets/ghi",
+ "../../symlinktargets/uvw",
+ "../../symlinktargets/xyz",
+ }
+ if !reflect.DeepEqual(want, fds) {
+ t.Errorf("want fds %v, have %v", want, fds)
+ }
+}
+
+func TestFileDescriptorsLen(t *testing.T) {
+ p1, err := FS("fixtures").NewProc(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+ l, err := p1.FileDescriptorsLen()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want, have := 5, l; want != have {
+ t.Errorf("want fds %d, have %d", want, have)
+ }
+}
+
+type byUintptr []uintptr
+
+func (a byUintptr) Len() int { return len(a) }
+func (a byUintptr) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byUintptr) Less(i, j int) bool { return a[i] < a[j] }
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
new file mode 100644
index 000000000..1ca217e8c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -0,0 +1,56 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Stat represents kernel/system statistics.
+type Stat struct {
+ // Boot time in seconds since the Epoch.
+ BootTime int64
+}
+
+// NewStat returns kernel/system statistics read from /proc/stat.
+func NewStat() (Stat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Stat{}, err
+ }
+
+ return fs.NewStat()
+}
+
+// NewStat returns an information about current kernel/system statistics.
+func (fs FS) NewStat() (Stat, error) {
+ f, err := os.Open(fs.Path("stat"))
+ if err != nil {
+ return Stat{}, err
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ line := s.Text()
+ if !strings.HasPrefix(line, "btime") {
+ continue
+ }
+ fields := strings.Fields(line)
+ if len(fields) != 2 {
+ return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
+ }
+ i, err := strconv.ParseInt(fields[1], 10, 32)
+ if err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
+ }
+ return Stat{BootTime: i}, nil
+ }
+ if err := s.Err(); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
+ }
+
+ return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
+}
diff --git a/vendor/github.com/prometheus/procfs/stat_test.go b/vendor/github.com/prometheus/procfs/stat_test.go
new file mode 100644
index 000000000..6eb792478
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/stat_test.go
@@ -0,0 +1,14 @@
+package procfs
+
+import "testing"
+
+func TestStat(t *testing.T) {
+ s, err := FS("fixtures").NewStat()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if want, have := int64(1418183276), s.BootTime; want != have {
+ t.Errorf("want boot time %d, have %d", want, have)
+ }
+}