summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/prometheus/client_golang/prometheus/promhttp
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/prometheus/client_golang/prometheus/promhttp')
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go6
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go2
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go129
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go119
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go26
5 files changed, 267 insertions, 15 deletions
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
index 5ee095b09..9c1c66dcc 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -102,10 +102,10 @@ func init() {
return d
}
pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
- return closeNotifierDelegator{d}
+ return &closeNotifierDelegator{d}
}
pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
- return flusherDelegator{d}
+ return &flusherDelegator{d}
}
pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
return struct {
@@ -115,7 +115,7 @@ func init() {
}{d, &flusherDelegator{d}, &closeNotifierDelegator{d}}
}
pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
- return hijackerDelegator{d}
+ return &hijackerDelegator{d}
}
pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
return struct {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
index f4d386f7a..75a905e2f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
@@ -28,7 +28,7 @@ func (d *pusherDelegator) Push(target string, opts *http.PushOptions) error {
func init() {
pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
- return pusherDelegator{d}
+ return &pusherDelegator{d}
}
pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
return struct {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
index 2d67f2496..8dc260355 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -39,6 +39,7 @@ import (
"net/http"
"strings"
"sync"
+ "time"
"github.com/prometheus/common/expfmt"
@@ -67,21 +68,51 @@ func giveBuf(buf *bytes.Buffer) {
bufPool.Put(buf)
}
-// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The
-// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP
-// error, no error logging, and compression if requested by the client.
+// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
+// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
+// no error logging, and it applies compression if requested by the client.
//
-// If you want to create a Handler for the DefaultGatherer with different
-// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and
-// your desired HandlerOpts.
+// The returned http.Handler is already instrumented using the
+// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
+// create multiple http.Handlers by separate calls of the Handler function, the
+// metrics used for instrumentation will be shared between them, providing
+// global scrape counts.
+//
+// This function is meant to cover the bulk of basic use cases. If you are doing
+// anything that requires more customization (including using a non-default
+// Gatherer, different instrumentation, and non-default HandlerOpts), use the
+// HandlerFor function. See there for details.
func Handler() http.Handler {
- return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{})
+ return InstrumentMetricHandler(
+ prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
+ )
}
-// HandlerFor returns an http.Handler for the provided Gatherer. The behavior
-// of the Handler is defined by the provided HandlerOpts.
+// HandlerFor returns an uninstrumented http.Handler for the provided
+// Gatherer. The behavior of the Handler is defined by the provided
+// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
+// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
+// instrumentation. Use the InstrumentMetricHandler function to apply the same
+// kind of instrumentation as it is used by the Handler function.
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ var inFlightSem chan struct{}
+ if opts.MaxRequestsInFlight > 0 {
+ inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
+ }
+
+ h := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ if inFlightSem != nil {
+ select {
+ case inFlightSem <- struct{}{}: // All good, carry on.
+ defer func() { <-inFlightSem }()
+ default:
+ http.Error(w, fmt.Sprintf(
+ "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
+ ), http.StatusServiceUnavailable)
+ return
+ }
+ }
+
mfs, err := reg.Gather()
if err != nil {
if opts.ErrorLog != nil {
@@ -137,9 +168,70 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
if encoding != "" {
header.Set(contentEncodingHeader, encoding)
}
- w.Write(buf.Bytes())
+ if _, err := w.Write(buf.Bytes()); err != nil && opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error while sending encoded metrics:", err)
+ }
// TODO(beorn7): Consider streaming serving of metrics.
})
+
+ if opts.Timeout <= 0 {
+ return h
+ }
+ return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
+ "Exceeded configured timeout of %v.\n",
+ opts.Timeout,
+ ))
+}
+
+// InstrumentMetricHandler is usually used with an http.Handler returned by the
+// HandlerFor function. It instruments the provided http.Handler with two
+// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
+// scrapes partitioned by HTTP status code, and a gauge
+// "promhttp_metric_handler_requests_in_flight" to track the number of
+// simultaneous scrapes. This function idempotently registers collectors for
+// both metrics with the provided Registerer. It panics if the registration
+// fails. The provided metrics are useful to see how many scrapes hit the
+// monitored target (which could be from different Prometheus servers or other
+// scrapers), and how often they overlap (which would result in more than one
+// scrape in flight at the same time). Note that the scrapes-in-flight gauge
+// will contain the scrape by which it is exposed, while the scrape counter will
+// only get incremented after the scrape is complete (as only then the status
+// code is known). For tracking scrape durations, use the
+// "scrape_duration_seconds" gauge created by the Prometheus server upon each
+// scrape.
+func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
+ cnt := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "promhttp_metric_handler_requests_total",
+ Help: "Total number of scrapes by HTTP status code.",
+ },
+ []string{"code"},
+ )
+ // Initialize the most likely HTTP status codes.
+ cnt.WithLabelValues("200")
+ cnt.WithLabelValues("500")
+ cnt.WithLabelValues("503")
+ if err := reg.Register(cnt); err != nil {
+ if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ cnt = are.ExistingCollector.(*prometheus.CounterVec)
+ } else {
+ panic(err)
+ }
+ }
+
+ gge := prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "promhttp_metric_handler_requests_in_flight",
+ Help: "Current number of scrapes being served.",
+ })
+ if err := reg.Register(gge); err != nil {
+ if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ gge = are.ExistingCollector.(prometheus.Gauge)
+ } else {
+ panic(err)
+ }
+ }
+
+ return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
}
// HandlerErrorHandling defines how a Handler serving metrics will handle
@@ -183,6 +275,21 @@ type HandlerOpts struct {
// If DisableCompression is true, the handler will never compress the
// response, even if requested by the client.
DisableCompression bool
+ // The number of concurrent HTTP requests is limited to
+ // MaxRequestsInFlight. Additional requests are responded to with 503
+ // Service Unavailable and a suitable message in the body. If
+ // MaxRequestsInFlight is 0 or negative, no limit is applied.
+ MaxRequestsInFlight int
+ // If handling a request takes longer than Timeout, it is responded to
+ // with 503 ServiceUnavailable and a suitable Message. No timeout is
+ // applied if Timeout is 0 or negative. Note that with the current
+ // implementation, reaching the timeout simply ends the HTTP requests as
+ // described above (and even that only if sending of the body hasn't
+ // started yet), while the bulk work of gathering all the metrics keeps
+ // running in the background (with the eventual result to be thrown
+ // away). Until the implementation is improved, it is recommended to
+ // implement a separate timeout in potentially slow Collectors.
+ Timeout time.Duration
}
// decorateWriter wraps a writer to handle gzip compression if requested. It
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go
index 413ff7baa..aeaa0b4d7 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go
@@ -19,7 +19,9 @@ import (
"log"
"net/http"
"net/http/httptest"
+ "strings"
"testing"
+ "time"
"github.com/prometheus/client_golang/prometheus"
)
@@ -37,6 +39,23 @@ func (e errorCollector) Collect(ch chan<- prometheus.Metric) {
)
}
+type blockingCollector struct {
+ CollectStarted, Block chan struct{}
+}
+
+func (b blockingCollector) Describe(ch chan<- *prometheus.Desc) {
+ ch <- prometheus.NewDesc("dummy_desc", "not helpful", nil, nil)
+}
+
+func (b blockingCollector) Collect(ch chan<- prometheus.Metric) {
+ select {
+ case b.CollectStarted <- struct{}{}:
+ default:
+ }
+ // Collects nothing, just waits for a channel receive.
+ <-b.Block
+}
+
func TestHandlerErrorHandling(t *testing.T) {
// Create a registry that collects a MetricFamily with two elements,
@@ -129,3 +148,103 @@ the_count 0
}()
panicHandler.ServeHTTP(writer, request)
}
+
+func TestInstrumentMetricHandler(t *testing.T) {
+ reg := prometheus.NewRegistry()
+ handler := InstrumentMetricHandler(reg, HandlerFor(reg, HandlerOpts{}))
+ // Do it again to test idempotency.
+ InstrumentMetricHandler(reg, HandlerFor(reg, HandlerOpts{}))
+ writer := httptest.NewRecorder()
+ request, _ := http.NewRequest("GET", "/", nil)
+ request.Header.Add("Accept", "test/plain")
+
+ handler.ServeHTTP(writer, request)
+ if got, want := writer.Code, http.StatusOK; got != want {
+ t.Errorf("got HTTP status code %d, want %d", got, want)
+ }
+
+ want := "promhttp_metric_handler_requests_in_flight 1\n"
+ if got := writer.Body.String(); !strings.Contains(got, want) {
+ t.Errorf("got body %q, does not contain %q", got, want)
+ }
+ want = "promhttp_metric_handler_requests_total{code=\"200\"} 0\n"
+ if got := writer.Body.String(); !strings.Contains(got, want) {
+ t.Errorf("got body %q, does not contain %q", got, want)
+ }
+
+ writer.Body.Reset()
+ handler.ServeHTTP(writer, request)
+ if got, want := writer.Code, http.StatusOK; got != want {
+ t.Errorf("got HTTP status code %d, want %d", got, want)
+ }
+
+ want = "promhttp_metric_handler_requests_in_flight 1\n"
+ if got := writer.Body.String(); !strings.Contains(got, want) {
+ t.Errorf("got body %q, does not contain %q", got, want)
+ }
+ want = "promhttp_metric_handler_requests_total{code=\"200\"} 1\n"
+ if got := writer.Body.String(); !strings.Contains(got, want) {
+ t.Errorf("got body %q, does not contain %q", got, want)
+ }
+}
+
+func TestHandlerMaxRequestsInFlight(t *testing.T) {
+ reg := prometheus.NewRegistry()
+ handler := HandlerFor(reg, HandlerOpts{MaxRequestsInFlight: 1})
+ w1 := httptest.NewRecorder()
+ w2 := httptest.NewRecorder()
+ w3 := httptest.NewRecorder()
+ request, _ := http.NewRequest("GET", "/", nil)
+ request.Header.Add("Accept", "test/plain")
+
+ c := blockingCollector{Block: make(chan struct{}), CollectStarted: make(chan struct{}, 1)}
+ reg.MustRegister(c)
+
+ rq1Done := make(chan struct{})
+ go func() {
+ handler.ServeHTTP(w1, request)
+ close(rq1Done)
+ }()
+ <-c.CollectStarted
+
+ handler.ServeHTTP(w2, request)
+
+ if got, want := w2.Code, http.StatusServiceUnavailable; got != want {
+ t.Errorf("got HTTP status code %d, want %d", got, want)
+ }
+ if got, want := w2.Body.String(), "Limit of concurrent requests reached (1), try again later.\n"; got != want {
+ t.Errorf("got body %q, want %q", got, want)
+ }
+
+ close(c.Block)
+ <-rq1Done
+
+ handler.ServeHTTP(w3, request)
+
+ if got, want := w3.Code, http.StatusOK; got != want {
+ t.Errorf("got HTTP status code %d, want %d", got, want)
+ }
+}
+
+func TestHandlerTimeout(t *testing.T) {
+ reg := prometheus.NewRegistry()
+ handler := HandlerFor(reg, HandlerOpts{Timeout: time.Millisecond})
+ w := httptest.NewRecorder()
+
+ request, _ := http.NewRequest("GET", "/", nil)
+ request.Header.Add("Accept", "test/plain")
+
+ c := blockingCollector{Block: make(chan struct{}), CollectStarted: make(chan struct{}, 1)}
+ reg.MustRegister(c)
+
+ handler.ServeHTTP(w, request)
+
+ if got, want := w.Code, http.StatusServiceUnavailable; got != want {
+ t.Errorf("got HTTP status code %d, want %d", got, want)
+ }
+ if got, want := w.Body.String(), "Exceeded configured timeout of 1ms.\n"; got != want {
+ t.Errorf("got body %q, want %q", got, want)
+ }
+
+ close(c.Block) // To not leak a goroutine.
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go
index e9af63e04..716c6f45e 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go
@@ -281,6 +281,16 @@ func (t *testResponseWriter) ReadFrom(io.Reader) (int64, error) {
return 0, nil
}
+// testFlusher is an http.ResponseWriter that also implements http.Flusher.
+type testFlusher struct {
+ flushCalled bool
+}
+
+func (t *testFlusher) Header() http.Header { return nil }
+func (t *testFlusher) Write([]byte) (int, error) { return 0, nil }
+func (t *testFlusher) WriteHeader(int) {}
+func (t *testFlusher) Flush() { t.flushCalled = true }
+
func TestInterfaceUpgrade(t *testing.T) {
w := &testResponseWriter{}
d := newDelegator(w, nil)
@@ -299,6 +309,22 @@ func TestInterfaceUpgrade(t *testing.T) {
if _, ok := d.(http.Hijacker); ok {
t.Error("delegator unexpectedly implements http.Hijacker")
}
+
+ f := &testFlusher{}
+ d = newDelegator(f, nil)
+ if _, ok := d.(http.CloseNotifier); ok {
+ t.Error("delegator unexpectedly implements http.CloseNotifier")
+ }
+ d.(http.Flusher).Flush()
+ if !w.flushCalled {
+ t.Error("Flush not called")
+ }
+ if _, ok := d.(io.ReaderFrom); ok {
+ t.Error("delegator unexpectedly implements io.ReaderFrom")
+ }
+ if _, ok := d.(http.Hijacker); ok {
+ t.Error("delegator unexpectedly implements http.Hijacker")
+ }
}
func ExampleInstrumentHandlerDuration() {