Commit e30c53da authored by Ben Kochie's avatar Ben Kochie

Update Prometheus vendoring

Update client_golang and common vendoring to latest release
* Improves text rendering performance.
* Pin to v0.9.x release.
parent 8ad495d6
...@@ -40,7 +40,8 @@ type Collector interface { ...@@ -40,7 +40,8 @@ type Collector interface {
// Collector may yield any Metric it sees fit in its Collect method. // Collector may yield any Metric it sees fit in its Collect method.
// //
// This method idempotently sends the same descriptors throughout the // This method idempotently sends the same descriptors throughout the
// lifetime of the Collector. // lifetime of the Collector. It may be called concurrently and
// therefore must be implemented in a concurrency safe way.
// //
// If a Collector encounters an error while executing this method, it // If a Collector encounters an error while executing this method, it
// must send an invalid descriptor (created with NewInvalidDesc) to // must send an invalid descriptor (created with NewInvalidDesc) to
...@@ -61,6 +62,39 @@ type Collector interface { ...@@ -61,6 +62,39 @@ type Collector interface {
Collect(chan<- Metric) Collect(chan<- Metric)
} }
// DescribeByCollect is a helper to implement the Describe method of a custom
// Collector. It collects the metrics from the provided Collector and sends
// their descriptors to the provided channel.
//
// If a Collector collects the same metrics throughout its lifetime, its
// Describe method can simply be implemented as:
//
// func (c customCollector) Describe(ch chan<- *Desc) {
// DescribeByCollect(c, ch)
// }
//
// However, this will not work if the metrics collected change dynamically over
// the lifetime of the Collector in a way that their combined set of descriptors
// changes as well. The shortcut implementation will then violate the contract
// of the Describe method. If a Collector sometimes collects no metrics at all
// (for example vectors like CounterVec, GaugeVec, etc., which only collect
// metrics after a metric with a fully specified label set has been accessed),
// it might even get registered as an unchecked Collecter (cf. the Register
// method of the Registerer interface). Hence, only use this shortcut
// implementation of Describe if you are certain to fulfill the contract.
//
// The Collector example demonstrates a use of DescribeByCollect.
func DescribeByCollect(c Collector, descs chan<- *Desc) {
metrics := make(chan Metric)
go func() {
c.Collect(metrics)
close(metrics)
}()
for m := range metrics {
descs <- m.Desc()
}
}
// selfCollector implements Collector for a single Metric so that the Metric // selfCollector implements Collector for a single Metric so that the Metric
// collects itself. Add it as an anonymous field to a struct that implements // collects itself. Add it as an anonymous field to a struct that implements
// Metric, and call init with the Metric itself as an argument. // Metric, and call init with the Metric itself as an argument.
......
...@@ -67,7 +67,7 @@ type Desc struct { ...@@ -67,7 +67,7 @@ type Desc struct {
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc // NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
// and will be reported on registration time. variableLabels and constLabels can // and will be reported on registration time. variableLabels and constLabels can
// be nil if no such labels should be set. fqName and help must not be empty. // be nil if no such labels should be set. fqName must not be empty.
// //
// variableLabels only contain the label names. Their label values are variable // variableLabels only contain the label names. Their label values are variable
// and therefore not part of the Desc. (They are managed within the Metric.) // and therefore not part of the Desc. (They are managed within the Metric.)
...@@ -80,10 +80,6 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * ...@@ -80,10 +80,6 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
help: help, help: help,
variableLabels: variableLabels, variableLabels: variableLabels,
} }
if help == "" {
d.err = errors.New("empty help string")
return d
}
if !model.IsValidMetricName(model.LabelValue(fqName)) { if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName) d.err = fmt.Errorf("%q is not a valid metric name", fqName)
return d return d
...@@ -156,7 +152,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * ...@@ -156,7 +152,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
Value: proto.String(v), Value: proto.String(v),
}) })
} }
sort.Sort(LabelPairSorter(d.constLabelPairs)) sort.Sort(labelPairSorter(d.constLabelPairs))
return d return d
} }
......
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus package prometheus
// Inline and byte-free variant of hash/fnv's fnv64a. // Inline and byte-free variant of hash/fnv's fnv64a.
......
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus package prometheus
import ( import (
......
...@@ -16,7 +16,9 @@ package prometheus ...@@ -16,7 +16,9 @@ package prometheus
import ( import (
"fmt" "fmt"
"math" "math"
"runtime"
"sort" "sort"
"sync"
"sync/atomic" "sync/atomic"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
...@@ -108,8 +110,9 @@ func ExponentialBuckets(start, factor float64, count int) []float64 { ...@@ -108,8 +110,9 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
} }
// HistogramOpts bundles the options for creating a Histogram metric. It is // HistogramOpts bundles the options for creating a Histogram metric. It is
// mandatory to set Name and Help to a non-empty string. All other fields are // mandatory to set Name to a non-empty string. All other fields are optional
// optional and can safely be left at their zero value. // and can safely be left at their zero value, although it is strongly
// encouraged to set a Help string.
type HistogramOpts struct { type HistogramOpts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified // Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Histogram (created by joining these components with // name of the Histogram (created by joining these components with
...@@ -120,7 +123,7 @@ type HistogramOpts struct { ...@@ -120,7 +123,7 @@ type HistogramOpts struct {
Subsystem string Subsystem string
Name string Name string
// Help provides information about this Histogram. Mandatory! // Help provides information about this Histogram.
// //
// Metrics with the same fully-qualified name must have the same Help // Metrics with the same fully-qualified name must have the same Help
// string. // string.
...@@ -184,6 +187,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr ...@@ -184,6 +187,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
desc: desc, desc: desc,
upperBounds: opts.Buckets, upperBounds: opts.Buckets,
labelPairs: makeLabelPairs(desc, labelValues), labelPairs: makeLabelPairs(desc, labelValues),
counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}},
} }
for i, upperBound := range h.upperBounds { for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 { if i < len(h.upperBounds)-1 {
...@@ -200,28 +204,53 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr ...@@ -200,28 +204,53 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
} }
} }
} }
// Finally we know the final length of h.upperBounds and can make counts. // Finally we know the final length of h.upperBounds and can make counts
h.counts = make([]uint64, len(h.upperBounds)) // for both states:
h.counts[0].buckets = make([]uint64, len(h.upperBounds))
h.counts[1].buckets = make([]uint64, len(h.upperBounds))
h.init(h) // Init self-collection. h.init(h) // Init self-collection.
return h return h
} }
type histogram struct { type histogramCounts struct {
// sumBits contains the bits of the float64 representing the sum of all // sumBits contains the bits of the float64 representing the sum of all
// observations. sumBits and count have to go first in the struct to // observations. sumBits and count have to go first in the struct to
// guarantee alignment for atomic operations. // guarantee alignment for atomic operations.
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
sumBits uint64 sumBits uint64
count uint64 count uint64
buckets []uint64
}
selfCollector type histogram struct {
// Note that there is no mutex required. // countAndHotIdx is a complicated one. For lock-free yet atomic
// observations, we need to save the total count of observations again,
// combined with the index of the currently-hot counts struct, so that
// we can perform the operation on both values atomically. The least
// significant bit defines the hot counts struct. The remaining 63 bits
// represent the total count of observations. This happens under the
// assumption that the 63bit count will never overflow. Rationale: An
// observations takes about 30ns. Let's assume it could happen in
// 10ns. Overflowing the counter will then take at least (2^63)*10ns,
// which is about 3000 years.
//
// This has to be first in the struct for 64bit alignment. See
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
countAndHotIdx uint64
selfCollector
desc *Desc desc *Desc
writeMtx sync.Mutex // Only used in the Write method.
upperBounds []float64 upperBounds []float64
counts []uint64
// Two counts, one is "hot" for lock-free observations, the other is
// "cold" for writing out a dto.Metric. It has to be an array of
// pointers to guarantee 64bit alignment of the histogramCounts, see
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
counts [2]*histogramCounts
hotIdx int // Index of currently-hot counts. Only used within Write.
labelPairs []*dto.LabelPair labelPairs []*dto.LabelPair
} }
...@@ -241,36 +270,113 @@ func (h *histogram) Observe(v float64) { ...@@ -241,36 +270,113 @@ func (h *histogram) Observe(v float64) {
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
i := sort.SearchFloat64s(h.upperBounds, v) i := sort.SearchFloat64s(h.upperBounds, v)
if i < len(h.counts) {
atomic.AddUint64(&h.counts[i], 1) // We increment h.countAndHotIdx by 2 so that the counter in the upper
// 63 bits gets incremented by 1. At the same time, we get the new value
// back, which we can use to find the currently-hot counts.
n := atomic.AddUint64(&h.countAndHotIdx, 2)
hotCounts := h.counts[n%2]
if i < len(h.upperBounds) {
atomic.AddUint64(&hotCounts.buckets[i], 1)
} }
atomic.AddUint64(&h.count, 1)
for { for {
oldBits := atomic.LoadUint64(&h.sumBits) oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v) newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
break break
} }
} }
// Increment count last as we take it as a signal that the observation
// is complete.
atomic.AddUint64(&hotCounts.count, 1)
} }
func (h *histogram) Write(out *dto.Metric) error { func (h *histogram) Write(out *dto.Metric) error {
his := &dto.Histogram{} var (
buckets := make([]*dto.Bucket, len(h.upperBounds)) his = &dto.Histogram{}
buckets = make([]*dto.Bucket, len(h.upperBounds))
hotCounts, coldCounts *histogramCounts
count uint64
)
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) // For simplicity, we mutex the rest of this method. It is not in the
his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) // hot path, i.e. Observe is called much more often than Write. The
var count uint64 // complication of making Write lock-free isn't worth it.
h.writeMtx.Lock()
defer h.writeMtx.Unlock()
// This is a bit arcane, which is why the following spells out this if
// clause in English:
//
// If the currently-hot counts struct is #0, we atomically increment
// h.countAndHotIdx by 1 so that from now on Observe will use the counts
// struct #1. Furthermore, the atomic increment gives us the new value,
// which, in its most significant 63 bits, tells us the count of
// observations done so far up to and including currently ongoing
// observations still using the counts struct just changed from hot to
// cold. To have a normal uint64 for the count, we bitshift by 1 and
// save the result in count. We also set h.hotIdx to 1 for the next
// Write call, and we will refer to counts #1 as hotCounts and to counts
// #0 as coldCounts.
//
// If the currently-hot counts struct is #1, we do the corresponding
// things the other way round. We have to _decrement_ h.countAndHotIdx
// (which is a bit arcane in itself, as we have to express -1 with an
// unsigned int...).
if h.hotIdx == 0 {
count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1
h.hotIdx = 1
hotCounts = h.counts[1]
coldCounts = h.counts[0]
} else {
count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
h.hotIdx = 0
hotCounts = h.counts[0]
coldCounts = h.counts[1]
}
// Now we have to wait for the now-declared-cold counts to actually cool
// down, i.e. wait for all observations still using it to finish. That's
// the case once the count in the cold counts struct is the same as the
// one atomically retrieved from the upper 63bits of h.countAndHotIdx.
for {
if count == atomic.LoadUint64(&coldCounts.count) {
break
}
runtime.Gosched() // Let observations get work done.
}
his.SampleCount = proto.Uint64(count)
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits)))
var cumCount uint64
for i, upperBound := range h.upperBounds { for i, upperBound := range h.upperBounds {
count += atomic.LoadUint64(&h.counts[i]) cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
buckets[i] = &dto.Bucket{ buckets[i] = &dto.Bucket{
CumulativeCount: proto.Uint64(count), CumulativeCount: proto.Uint64(cumCount),
UpperBound: proto.Float64(upperBound), UpperBound: proto.Float64(upperBound),
} }
} }
his.Bucket = buckets his.Bucket = buckets
out.Histogram = his out.Histogram = his
out.Label = h.labelPairs out.Label = h.labelPairs
// Finally add all the cold counts to the new hot counts and reset the cold counts.
atomic.AddUint64(&hotCounts.count, count)
atomic.StoreUint64(&coldCounts.count, 0)
for {
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
atomic.StoreUint64(&coldCounts.sumBits, 0)
break
}
}
for i := range h.upperBounds {
atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
atomic.StoreUint64(&coldCounts.buckets[i], 0)
}
return nil return nil
} }
...@@ -454,7 +560,7 @@ func (h *constHistogram) Write(out *dto.Metric) error { ...@@ -454,7 +560,7 @@ func (h *constHistogram) Write(out *dto.Metric) error {
// bucket. // bucket.
// //
// NewConstHistogram returns an error if the length of labelValues is not // NewConstHistogram returns an error if the length of labelValues is not
// consistent with the variable labels in Desc. // consistent with the variable labels in Desc or if Desc is invalid.
func NewConstHistogram( func NewConstHistogram(
desc *Desc, desc *Desc,
count uint64, count uint64,
...@@ -462,6 +568,9 @@ func NewConstHistogram( ...@@ -462,6 +568,9 @@ func NewConstHistogram(
buckets map[float64]uint64, buckets map[float64]uint64,
labelValues ...string, labelValues ...string,
) (Metric, error) { ) (Metric, error) {
if desc.err != nil {
return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
return nil, err return nil, err
} }
......
...@@ -61,15 +61,15 @@ func giveBuf(buf *bytes.Buffer) { ...@@ -61,15 +61,15 @@ func giveBuf(buf *bytes.Buffer) {
// name). // name).
// //
// Deprecated: Please note the issues described in the doc comment of // Deprecated: Please note the issues described in the doc comment of
// InstrumentHandler. You might want to consider using // InstrumentHandler. You might want to consider using promhttp.Handler instead.
// promhttp.InstrumentedHandler instead.
func Handler() http.Handler { func Handler() http.Handler {
return InstrumentHandler("prometheus", UninstrumentedHandler()) return InstrumentHandler("prometheus", UninstrumentedHandler())
} }
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. // UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
// //
// Deprecated: Use promhttp.Handler instead. See there for further documentation. // Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{})
// instead. See there for further documentation.
func UninstrumentedHandler() http.Handler { func UninstrumentedHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
mfs, err := DefaultGatherer.Gather() mfs, err := DefaultGatherer.Gather()
...@@ -149,21 +149,14 @@ var now nower = nowFunc(func() time.Time { ...@@ -149,21 +149,14 @@ var now nower = nowFunc(func() time.Time {
// (label name "method") and HTTP status code (label name "code"). // (label name "method") and HTTP status code (label name "code").
// //
// Deprecated: InstrumentHandler has several issues. Use the tooling provided in // Deprecated: InstrumentHandler has several issues. Use the tooling provided in
// package promhttp instead. The issues are the following: // package promhttp instead. The issues are the following: (1) It uses Summaries
// // rather than Histograms. Summaries are not useful if aggregation across
// - It uses Summaries rather than Histograms. Summaries are not useful if // multiple instances is required. (2) It uses microseconds as unit, which is
// aggregation across multiple instances is required. // deprecated and should be replaced by seconds. (3) The size of the request is
// // calculated in a separate goroutine. Since this calculator requires access to
// - It uses microseconds as unit, which is deprecated and should be replaced by // the request header, it creates a race with any writes to the header performed
// seconds. // during request handling. httputil.ReverseProxy is a prominent example for a
// // handler performing such writes. (4) It has additional issues with HTTP/2, cf.
// - The size of the request is calculated in a separate goroutine. Since this
// calculator requires access to the request header, it creates a race with
// any writes to the header performed during request handling.
// httputil.ReverseProxy is a prominent example for a handler
// performing such writes.
//
// - It has additional issues with HTTP/2, cf.
// https://github.com/prometheus/client_golang/issues/272. // https://github.com/prometheus/client_golang/issues/272.
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
...@@ -307,7 +300,7 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo ...@@ -307,7 +300,7 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
} }
func computeApproximateRequestSize(r *http.Request) <-chan int { func computeApproximateRequestSize(r *http.Request) <-chan int {
// Get URL length in current go routine for avoiding a race condition. // Get URL length in current goroutine for avoiding a race condition.
// HandlerFunc that runs in parallel may modify the URL. // HandlerFunc that runs in parallel may modify the URL.
s := 0 s := 0
if r.URL != nil { if r.URL != nil {
......
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"sort"
dto "github.com/prometheus/client_model/go"
)
// metricSorter is a sortable slice of *dto.Metric.
type metricSorter []*dto.Metric
func (s metricSorter) Len() int {
return len(s)
}
func (s metricSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s metricSorter) Less(i, j int) bool {
if len(s[i].Label) != len(s[j].Label) {
// This should not happen. The metrics are
// inconsistent. However, we have to deal with the fact, as
// people might use custom collectors or metric family injection
// to create inconsistent metrics. So let's simply compare the
// number of labels in this case. That will still yield
// reproducible sorting.
return len(s[i].Label) < len(s[j].Label)
}
for n, lp := range s[i].Label {
vi := lp.GetValue()
vj := s[j].Label[n].GetValue()
if vi != vj {
return vi < vj
}
}
// We should never arrive here. Multiple metrics with the same
// label set in the same scrape will lead to undefined ingestion
// behavior. However, as above, we have to provide stable sorting
// here, even for inconsistent metrics. So sort equal metrics
// by their timestamp, with missing timestamps (implying "now")
// coming last.
if s[i].TimestampMs == nil {
return false
}
if s[j].TimestampMs == nil {
return true
}
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
}
// NormalizeMetricFamilies returns a MetricFamily slice with empty
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
// the slice, with the contained Metrics sorted within each MetricFamily.
func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
for _, mf := range metricFamiliesByName {
sort.Sort(metricSorter(mf.Metric))
}
names := make([]string, 0, len(metricFamiliesByName))
for name, mf := range metricFamiliesByName {
if len(mf.Metric) > 0 {
names = append(names, name)
}
}
sort.Strings(names)
result := make([]*dto.MetricFamily, 0, len(names))
for _, name := range names {
result = append(result, metricFamiliesByName[name])
}
return result
}
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus package prometheus
import ( import (
......
...@@ -15,6 +15,9 @@ package prometheus ...@@ -15,6 +15,9 @@ package prometheus
import ( import (
"strings" "strings"
"time"
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
) )
...@@ -43,9 +46,8 @@ type Metric interface { ...@@ -43,9 +46,8 @@ type Metric interface {
// While populating dto.Metric, it is the responsibility of the // While populating dto.Metric, it is the responsibility of the
// implementation to ensure validity of the Metric protobuf (like valid // implementation to ensure validity of the Metric protobuf (like valid
// UTF-8 strings or syntactically valid metric and label names). It is // UTF-8 strings or syntactically valid metric and label names). It is
// recommended to sort labels lexicographically. (Implementers may find // recommended to sort labels lexicographically. Callers of Write should
// LabelPairSorter useful for that.) Callers of Write should still make // still make sure of sorting if they depend on it.
// sure of sorting if they depend on it.
Write(*dto.Metric) error Write(*dto.Metric) error
// TODO(beorn7): The original rationale of passing in a pre-allocated // TODO(beorn7): The original rationale of passing in a pre-allocated
// dto.Metric protobuf to save allocations has disappeared. The // dto.Metric protobuf to save allocations has disappeared. The
...@@ -57,8 +59,9 @@ type Metric interface { ...@@ -57,8 +59,9 @@ type Metric interface {
// implementation XXX has its own XXXOpts type, but in most cases, it is just be // implementation XXX has its own XXXOpts type, but in most cases, it is just be
// an alias of this type (which might change when the requirement arises.) // an alias of this type (which might change when the requirement arises.)
// //
// It is mandatory to set Name and Help to a non-empty string. All other fields // It is mandatory to set Name to a non-empty string. All other fields are
// are optional and can safely be left at their zero value. // optional and can safely be left at their zero value, although it is strongly
// encouraged to set a Help string.
type Opts struct { type Opts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified // Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Metric (created by joining these components with // name of the Metric (created by joining these components with
...@@ -69,7 +72,7 @@ type Opts struct { ...@@ -69,7 +72,7 @@ type Opts struct {
Subsystem string Subsystem string
Name string Name string
// Help provides information about this metric. Mandatory! // Help provides information about this metric.
// //
// Metrics with the same fully-qualified name must have the same Help // Metrics with the same fully-qualified name must have the same Help
// string. // string.
...@@ -110,20 +113,19 @@ func BuildFQName(namespace, subsystem, name string) string { ...@@ -110,20 +113,19 @@ func BuildFQName(namespace, subsystem, name string) string {
return name return name
} }
// LabelPairSorter implements sort.Interface. It is used to sort a slice of // labelPairSorter implements sort.Interface. It is used to sort a slice of
// dto.LabelPair pointers. This is useful for implementing the Write method of // dto.LabelPair pointers.
// custom metrics. type labelPairSorter []*dto.LabelPair
type LabelPairSorter []*dto.LabelPair
func (s LabelPairSorter) Len() int { func (s labelPairSorter) Len() int {
return len(s) return len(s)
} }
func (s LabelPairSorter) Swap(i, j int) { func (s labelPairSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i] s[i], s[j] = s[j], s[i]
} }
func (s LabelPairSorter) Less(i, j int) bool { func (s labelPairSorter) Less(i, j int) bool {
return s[i].GetName() < s[j].GetName() return s[i].GetName() < s[j].GetName()
} }
...@@ -142,3 +144,31 @@ func NewInvalidMetric(desc *Desc, err error) Metric { ...@@ -142,3 +144,31 @@ func NewInvalidMetric(desc *Desc, err error) Metric {
func (m *invalidMetric) Desc() *Desc { return m.desc } func (m *invalidMetric) Desc() *Desc { return m.desc }
func (m *invalidMetric) Write(*dto.Metric) error { return m.err } func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
type timestampedMetric struct {
Metric
t time.Time
}
func (m timestampedMetric) Write(pb *dto.Metric) error {
e := m.Metric.Write(pb)
pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
return e
}
// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
// way that it has an explicit timestamp set to the provided Time. This is only
// useful in rare cases as the timestamp of a Prometheus metric should usually
// be set by the Prometheus server during scraping. Exceptions include mirroring
// metrics with given timestamps from other metric
// sources.
//
// NewMetricWithTimestamp works best with MustNewConstMetric,
// MustNewConstHistogram, and MustNewConstSummary, see example.
//
// Currently, the exposition formats used by Prometheus are limited to
// millisecond resolution. Thus, the provided time will be rounded down to the
// next full millisecond value.
func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
return timestampedMetric{Metric: m, t: t}
}
...@@ -13,45 +13,74 @@ ...@@ -13,45 +13,74 @@
package prometheus package prometheus
import "github.com/prometheus/procfs" import (
"errors"
"os"
"github.com/prometheus/procfs"
)
type processCollector struct { type processCollector struct {
collectFn func(chan<- Metric) collectFn func(chan<- Metric)
pidFn func() (int, error) pidFn func() (int, error)
reportErrors bool
cpuTotal *Desc cpuTotal *Desc
openFDs, maxFDs *Desc openFDs, maxFDs *Desc
vsize, rss *Desc vsize, maxVsize *Desc
rss *Desc
startTime *Desc startTime *Desc
} }
// ProcessCollectorOpts defines the behavior of a process metrics collector
// created with NewProcessCollector.
type ProcessCollectorOpts struct {
// PidFn returns the PID of the process the collector collects metrics
// for. It is called upon each collection. By default, the PID of the
// current process is used, as determined on construction time by
// calling os.Getpid().
PidFn func() (int, error)
// If non-empty, each of the collected metrics is prefixed by the
// provided string and an underscore ("_").
Namespace string
// If true, any error encountered during collection is reported as an
// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
// and the collected metrics will be incomplete. (Possibly, no metrics
// will be collected at all.) While that's usually not desired, it is
// appropriate for the common "mix-in" of process metrics, where process
// metrics are nice to have, but failing to collect them should not
// disrupt the collection of the remaining metrics.
ReportErrors bool
}
// NewProcessCollector returns a collector which exports the current state of // NewProcessCollector returns a collector which exports the current state of
// process metrics including CPU, memory and file descriptor usage as well as // process metrics including CPU, memory and file descriptor usage as well as
// the process start time for the given process ID under the given namespace. // the process start time. The detailed behavior is defined by the provided
// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a
// collector for the current process with an empty namespace string and no error
// reporting.
// //
// Currently, the collector depends on a Linux-style proc filesystem and // Currently, the collector depends on a Linux-style proc filesystem and
// therefore only exports metrics for Linux. // therefore only exports metrics for Linux.
func NewProcessCollector(pid int, namespace string) Collector { //
return NewProcessCollectorPIDFn( // Note: An older version of this function had the following signature:
func() (int, error) { return pid, nil }, //
namespace, // NewProcessCollector(pid int, namespace string) Collector
) //
} // Most commonly, it was called as
//
// NewProcessCollectorPIDFn works like NewProcessCollector but the process ID is // NewProcessCollector(os.Getpid(), "")
// determined on each collect anew by calling the given pidFn function. //
func NewProcessCollectorPIDFn( // The following call of the current version is equivalent to the above:
pidFn func() (int, error), //
namespace string, // NewProcessCollector(ProcessCollectorOpts{})
) Collector { func NewProcessCollector(opts ProcessCollectorOpts) Collector {
ns := "" ns := ""
if len(namespace) > 0 { if len(opts.Namespace) > 0 {
ns = namespace + "_" ns = opts.Namespace + "_"
} }
c := processCollector{ c := &processCollector{
pidFn: pidFn, reportErrors: opts.ReportErrors,
collectFn: func(chan<- Metric) {},
cpuTotal: NewDesc( cpuTotal: NewDesc(
ns+"process_cpu_seconds_total", ns+"process_cpu_seconds_total",
"Total user and system CPU time spent in seconds.", "Total user and system CPU time spent in seconds.",
...@@ -72,6 +101,11 @@ func NewProcessCollectorPIDFn( ...@@ -72,6 +101,11 @@ func NewProcessCollectorPIDFn(
"Virtual memory size in bytes.", "Virtual memory size in bytes.",
nil, nil, nil, nil,
), ),
maxVsize: NewDesc(
ns+"process_virtual_memory_max_bytes",
"Maximum amount of virtual memory available in bytes.",
nil, nil,
),
rss: NewDesc( rss: NewDesc(
ns+"process_resident_memory_bytes", ns+"process_resident_memory_bytes",
"Resident memory size in bytes.", "Resident memory size in bytes.",
...@@ -84,12 +118,23 @@ func NewProcessCollectorPIDFn( ...@@ -84,12 +118,23 @@ func NewProcessCollectorPIDFn(
), ),
} }
if opts.PidFn == nil {
pid := os.Getpid()
c.pidFn = func() (int, error) { return pid, nil }
} else {
c.pidFn = opts.PidFn
}
// Set up process metric collection if supported by the runtime. // Set up process metric collection if supported by the runtime.
if _, err := procfs.NewStat(); err == nil { if _, err := procfs.NewStat(); err == nil {
c.collectFn = c.processCollect c.collectFn = c.processCollect
} else {
c.collectFn = func(ch chan<- Metric) {
c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
}
} }
return &c return c
} }
// Describe returns all descriptions of the collector. // Describe returns all descriptions of the collector.
...@@ -98,6 +143,7 @@ func (c *processCollector) Describe(ch chan<- *Desc) { ...@@ -98,6 +143,7 @@ func (c *processCollector) Describe(ch chan<- *Desc) {
ch <- c.openFDs ch <- c.openFDs
ch <- c.maxFDs ch <- c.maxFDs
ch <- c.vsize ch <- c.vsize
ch <- c.maxVsize
ch <- c.rss ch <- c.rss
ch <- c.startTime ch <- c.startTime
} }
...@@ -107,16 +153,16 @@ func (c *processCollector) Collect(ch chan<- Metric) { ...@@ -107,16 +153,16 @@ func (c *processCollector) Collect(ch chan<- Metric) {
c.collectFn(ch) c.collectFn(ch)
} }
// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
// client allows users to configure the error behavior.
func (c *processCollector) processCollect(ch chan<- Metric) { func (c *processCollector) processCollect(ch chan<- Metric) {
pid, err := c.pidFn() pid, err := c.pidFn()
if err != nil { if err != nil {
c.reportError(ch, nil, err)
return return
} }
p, err := procfs.NewProc(pid) p, err := procfs.NewProc(pid)
if err != nil { if err != nil {
c.reportError(ch, nil, err)
return return
} }
...@@ -126,14 +172,33 @@ func (c *processCollector) processCollect(ch chan<- Metric) { ...@@ -126,14 +172,33 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
if startTime, err := stat.StartTime(); err == nil { if startTime, err := stat.StartTime(); err == nil {
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
} else {
c.reportError(ch, c.startTime, err)
} }
} else {
c.reportError(ch, nil, err)
} }
if fds, err := p.FileDescriptorsLen(); err == nil { if fds, err := p.FileDescriptorsLen(); err == nil {
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
} else {
c.reportError(ch, c.openFDs, err)
} }
if limits, err := p.NewLimits(); err == nil { if limits, err := p.NewLimits(); err == nil {
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
} else {
c.reportError(ch, nil, err)
}
}
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
if !c.reportErrors {
return
}
if desc == nil {
desc = NewInvalidDesc(err)
} }
ch <- NewInvalidMetric(desc, err)
} }
...@@ -16,7 +16,6 @@ package prometheus ...@@ -16,7 +16,6 @@ package prometheus
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"os"
"runtime" "runtime"
"sort" "sort"
"strings" "strings"
...@@ -26,6 +25,8 @@ import ( ...@@ -26,6 +25,8 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus/internal"
) )
const ( const (
...@@ -52,7 +53,7 @@ var ( ...@@ -52,7 +53,7 @@ var (
) )
func init() { func init() {
MustRegister(NewProcessCollector(os.Getpid(), "")) MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
MustRegister(NewGoCollector()) MustRegister(NewGoCollector())
} }
...@@ -106,9 +107,6 @@ type Registerer interface { ...@@ -106,9 +107,6 @@ type Registerer interface {
// Collector, and for providing a Collector that will not cause // Collector, and for providing a Collector that will not cause
// inconsistent metrics on collection. (This would lead to scrape // inconsistent metrics on collection. (This would lead to scrape
// errors.) // errors.)
//
// It is in general not safe to register the same Collector multiple
// times concurrently.
Register(Collector) error Register(Collector) error
// MustRegister works like Register but registers any number of // MustRegister works like Register but registers any number of
// Collectors and panics upon the first registration that causes an // Collectors and panics upon the first registration that causes an
...@@ -272,7 +270,12 @@ func (r *Registry) Register(c Collector) error { ...@@ -272,7 +270,12 @@ func (r *Registry) Register(c Collector) error {
close(descChan) close(descChan)
}() }()
r.mtx.Lock() r.mtx.Lock()
defer r.mtx.Unlock() defer func() {
// Drain channel in case of premature return to not leak a goroutine.
for range descChan {
}
r.mtx.Unlock()
}()
// Conduct various tests... // Conduct various tests...
for desc := range descChan { for desc := range descChan {
...@@ -527,7 +530,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { ...@@ -527,7 +530,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
break break
} }
} }
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
} }
// processMetric is an internal helper method only used by the Gather method. // processMetric is an internal helper method only used by the Gather method.
...@@ -538,6 +541,11 @@ func processMetric( ...@@ -538,6 +541,11 @@ func processMetric(
registeredDescIDs map[uint64]struct{}, registeredDescIDs map[uint64]struct{},
) error { ) error {
desc := metric.Desc() desc := metric.Desc()
// Wrapped metrics collected by an unchecked Collector can have an
// invalid Desc.
if desc.err != nil {
return desc.err
}
dtoMetric := &dto.Metric{} dtoMetric := &dto.Metric{}
if err := metric.Write(dtoMetric); err != nil { if err := metric.Write(dtoMetric); err != nil {
return fmt.Errorf("error collecting metric %v: %s", desc, err) return fmt.Errorf("error collecting metric %v: %s", desc, err)
...@@ -707,72 +715,7 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { ...@@ -707,72 +715,7 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
} }
} }
} }
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
}
// metricSorter is a sortable slice of *dto.Metric.
type metricSorter []*dto.Metric
func (s metricSorter) Len() int {
return len(s)
}
func (s metricSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s metricSorter) Less(i, j int) bool {
if len(s[i].Label) != len(s[j].Label) {
// This should not happen. The metrics are
// inconsistent. However, we have to deal with the fact, as
// people might use custom collectors or metric family injection
// to create inconsistent metrics. So let's simply compare the
// number of labels in this case. That will still yield
// reproducible sorting.
return len(s[i].Label) < len(s[j].Label)
}
for n, lp := range s[i].Label {
vi := lp.GetValue()
vj := s[j].Label[n].GetValue()
if vi != vj {
return vi < vj
}
}
// We should never arrive here. Multiple metrics with the same
// label set in the same scrape will lead to undefined ingestion
// behavior. However, as above, we have to provide stable sorting
// here, even for inconsistent metrics. So sort equal metrics
// by their timestamp, with missing timestamps (implying "now")
// coming last.
if s[i].TimestampMs == nil {
return false
}
if s[j].TimestampMs == nil {
return true
}
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
}
// normalizeMetricFamilies returns a MetricFamily slice with empty
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
// the slice, with the contained Metrics sorted within each MetricFamily.
func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
for _, mf := range metricFamiliesByName {
sort.Sort(metricSorter(mf.Metric))
}
names := make([]string, 0, len(metricFamiliesByName))
for name, mf := range metricFamiliesByName {
if len(mf.Metric) > 0 {
names = append(names, name)
}
}
sort.Strings(names)
result := make([]*dto.MetricFamily, 0, len(names))
for _, name := range names {
result = append(result, metricFamiliesByName[name])
}
return result
} }
// checkSuffixCollisions checks for collisions with the “magic” suffixes the // checkSuffixCollisions checks for collisions with the “magic” suffixes the
...@@ -844,6 +787,8 @@ func checkMetricConsistency( ...@@ -844,6 +787,8 @@ func checkMetricConsistency(
dtoMetric *dto.Metric, dtoMetric *dto.Metric,
metricHashes map[uint64]struct{}, metricHashes map[uint64]struct{},
) error { ) error {
name := metricFamily.GetName()
// Type consistency with metric family. // Type consistency with metric family.
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
...@@ -852,37 +797,46 @@ func checkMetricConsistency( ...@@ -852,37 +797,46 @@ func checkMetricConsistency(
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
return fmt.Errorf( return fmt.Errorf(
"collected metric %q { %s} is not a %s", "collected metric %q { %s} is not a %s",
metricFamily.GetName(), dtoMetric, metricFamily.GetType(), name, dtoMetric, metricFamily.GetType(),
) )
} }
previousLabelName := ""
for _, labelPair := range dtoMetric.GetLabel() { for _, labelPair := range dtoMetric.GetLabel() {
if !checkLabelName(labelPair.GetName()) { labelName := labelPair.GetName()
if labelName == previousLabelName {
return fmt.Errorf(
"collected metric %q { %s} has two or more labels with the same name: %s",
name, dtoMetric, labelName,
)
}
if !checkLabelName(labelName) {
return fmt.Errorf( return fmt.Errorf(
"collected metric %q { %s} has a label with an invalid name: %s", "collected metric %q { %s} has a label with an invalid name: %s",
metricFamily.GetName(), dtoMetric, labelPair.GetName(), name, dtoMetric, labelName,
) )
} }
if dtoMetric.Summary != nil && labelPair.GetName() == quantileLabel { if dtoMetric.Summary != nil && labelName == quantileLabel {
return fmt.Errorf( return fmt.Errorf(
"collected metric %q { %s} must not have an explicit %q label", "collected metric %q { %s} must not have an explicit %q label",
metricFamily.GetName(), dtoMetric, quantileLabel, name, dtoMetric, quantileLabel,
) )
} }
if !utf8.ValidString(labelPair.GetValue()) { if !utf8.ValidString(labelPair.GetValue()) {
return fmt.Errorf( return fmt.Errorf(
"collected metric %q { %s} has a label named %q whose value is not utf8: %#v", "collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
metricFamily.GetName(), dtoMetric, labelPair.GetName(), labelPair.GetValue()) name, dtoMetric, labelName, labelPair.GetValue())
} }
previousLabelName = labelName
} }
// Is the metric unique (i.e. no other metric with the same name and the same labels)? // Is the metric unique (i.e. no other metric with the same name and the same labels)?
h := hashNew() h := hashNew()
h = hashAdd(h, metricFamily.GetName()) h = hashAdd(h, name)
h = hashAddByte(h, separatorByte) h = hashAddByte(h, separatorByte)
// Make sure label pairs are sorted. We depend on it for the consistency // Make sure label pairs are sorted. We depend on it for the consistency
// check. // check.
sort.Sort(LabelPairSorter(dtoMetric.Label)) sort.Sort(labelPairSorter(dtoMetric.Label))
for _, lp := range dtoMetric.Label { for _, lp := range dtoMetric.Label {
h = hashAdd(h, lp.GetName()) h = hashAdd(h, lp.GetName())
h = hashAddByte(h, separatorByte) h = hashAddByte(h, separatorByte)
...@@ -892,7 +846,7 @@ func checkMetricConsistency( ...@@ -892,7 +846,7 @@ func checkMetricConsistency(
if _, exists := metricHashes[h]; exists { if _, exists := metricHashes[h]; exists {
return fmt.Errorf( return fmt.Errorf(
"collected metric %q { %s} was collected before with the same name and label values", "collected metric %q { %s} was collected before with the same name and label values",
metricFamily.GetName(), dtoMetric, name, dtoMetric,
) )
} }
metricHashes[h] = struct{}{} metricHashes[h] = struct{}{}
...@@ -926,7 +880,7 @@ func checkDescConsistency( ...@@ -926,7 +880,7 @@ func checkDescConsistency(
metricFamily.GetName(), dtoMetric, desc, metricFamily.GetName(), dtoMetric, desc,
) )
} }
sort.Sort(LabelPairSorter(lpsFromDesc)) sort.Sort(labelPairSorter(lpsFromDesc))
for i, lpFromDesc := range lpsFromDesc { for i, lpFromDesc := range lpsFromDesc {
lpFromMetric := dtoMetric.Label[i] lpFromMetric := dtoMetric.Label[i]
if lpFromDesc.GetName() != lpFromMetric.GetName() || if lpFromDesc.GetName() != lpFromMetric.GetName() ||
......
...@@ -37,7 +37,7 @@ const quantileLabel = "quantile" ...@@ -37,7 +37,7 @@ const quantileLabel = "quantile"
// A typical use-case is the observation of request latencies. By default, a // A typical use-case is the observation of request latencies. By default, a
// Summary provides the median, the 90th and the 99th percentile of the latency // Summary provides the median, the 90th and the 99th percentile of the latency
// as rank estimations. However, the default behavior will change in the // as rank estimations. However, the default behavior will change in the
// upcoming v0.10 of the library. There will be no rank estiamtions at all by // upcoming v0.10 of the library. There will be no rank estimations at all by
// default. For a sane transition, it is recommended to set the desired rank // default. For a sane transition, it is recommended to set the desired rank
// estimations explicitly. // estimations explicitly.
// //
...@@ -81,10 +81,10 @@ const ( ...@@ -81,10 +81,10 @@ const (
) )
// SummaryOpts bundles the options for creating a Summary metric. It is // SummaryOpts bundles the options for creating a Summary metric. It is
// mandatory to set Name and Help to a non-empty string. While all other fields // mandatory to set Name to a non-empty string. While all other fields are
// are optional and can safely be left at their zero value, it is recommended to // optional and can safely be left at their zero value, it is recommended to set
// explicitly set the Objectives field to the desired value as the default value // a help string and to explicitly set the Objectives field to the desired value
// will change in the upcoming v0.10 of the library. // as the default value will change in the upcoming v0.10 of the library.
type SummaryOpts struct { type SummaryOpts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified // Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Summary (created by joining these components with // name of the Summary (created by joining these components with
...@@ -95,7 +95,7 @@ type SummaryOpts struct { ...@@ -95,7 +95,7 @@ type SummaryOpts struct {
Subsystem string Subsystem string
Name string Name string
// Help provides information about this Summary. Mandatory! // Help provides information about this Summary.
// //
// Metrics with the same fully-qualified name must have the same Help // Metrics with the same fully-qualified name must have the same Help
// string. // string.
...@@ -586,7 +586,7 @@ func (s *constSummary) Write(out *dto.Metric) error { ...@@ -586,7 +586,7 @@ func (s *constSummary) Write(out *dto.Metric) error {
// map[float64]float64{0.5: 0.23, 0.99: 0.56} // map[float64]float64{0.5: 0.23, 0.99: 0.56}
// //
// NewConstSummary returns an error if the length of labelValues is not // NewConstSummary returns an error if the length of labelValues is not
// consistent with the variable labels in Desc. // consistent with the variable labels in Desc or if Desc is invalid.
func NewConstSummary( func NewConstSummary(
desc *Desc, desc *Desc,
count uint64, count uint64,
...@@ -594,6 +594,9 @@ func NewConstSummary( ...@@ -594,6 +594,9 @@ func NewConstSummary(
quantiles map[float64]float64, quantiles map[float64]float64,
labelValues ...string, labelValues ...string,
) (Metric, error) { ) (Metric, error) {
if desc.err != nil {
return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
return nil, err return nil, err
} }
......
...@@ -17,9 +17,9 @@ import ( ...@@ -17,9 +17,9 @@ import (
"fmt" "fmt"
"sort" "sort"
dto "github.com/prometheus/client_model/go"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
) )
// ValueType is an enumeration of metric types that represent a simple value. // ValueType is an enumeration of metric types that represent a simple value.
...@@ -77,8 +77,12 @@ func (v *valueFunc) Write(out *dto.Metric) error { ...@@ -77,8 +77,12 @@ func (v *valueFunc) Write(out *dto.Metric) error {
// operations. However, when implementing custom Collectors, it is useful as a // operations. However, when implementing custom Collectors, it is useful as a
// throw-away metric that is generated on the fly to send it to Prometheus in // throw-away metric that is generated on the fly to send it to Prometheus in
// the Collect method. NewConstMetric returns an error if the length of // the Collect method. NewConstMetric returns an error if the length of
// labelValues is not consistent with the variable labels in Desc. // labelValues is not consistent with the variable labels in Desc or if Desc is
// invalid.
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
if desc.err != nil {
return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
return nil, err return nil, err
} }
...@@ -153,6 +157,6 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { ...@@ -153,6 +157,6 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
}) })
} }
labelPairs = append(labelPairs, desc.constLabelPairs...) labelPairs = append(labelPairs, desc.constLabelPairs...)
sort.Sort(LabelPairSorter(labelPairs)) sort.Sort(labelPairSorter(labelPairs))
return labelPairs return labelPairs
} }
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"fmt"
"sort"
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
)
// WrapRegistererWith returns a Registerer wrapping the provided
// Registerer. Collectors registered with the returned Registerer will be
// registered with the wrapped Registerer in a modified way. The modified
// Collector adds the provided Labels to all Metrics it collects (as
// ConstLabels). The Metrics collected by the unmodified Collector must not
// duplicate any of those labels.
//
// WrapRegistererWith provides a way to add fixed labels to a subset of
// Collectors. It should not be used to add fixed labels to all metrics exposed.
//
// The Collector example demonstrates a use of WrapRegistererWith.
func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
return &wrappingRegisterer{
wrappedRegisterer: reg,
labels: labels,
}
}
// WrapRegistererWithPrefix returns a Registerer wrapping the provided
// Registerer. Collectors registered with the returned Registerer will be
// registered with the wrapped Registerer in a modified way. The modified
// Collector adds the provided prefix to the name of all Metrics it collects.
//
// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
// a sub-system. To make this work, register metrics of the sub-system with the
// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
// to use the same prefix for all metrics exposed. In particular, do not prefix
// metric names that are standardized across applications, as that would break
// horizontal monitoring, for example the metrics provided by the Go collector
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
// fact, those metrics are already prefixed with “go_” or “process_”,
// respectively.)
func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
return &wrappingRegisterer{
wrappedRegisterer: reg,
prefix: prefix,
}
}
type wrappingRegisterer struct {
wrappedRegisterer Registerer
prefix string
labels Labels
}
func (r *wrappingRegisterer) Register(c Collector) error {
return r.wrappedRegisterer.Register(&wrappingCollector{
wrappedCollector: c,
prefix: r.prefix,
labels: r.labels,
})
}
func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
for _, c := range cs {
if err := r.Register(c); err != nil {
panic(err)
}
}
}
func (r *wrappingRegisterer) Unregister(c Collector) bool {
return r.wrappedRegisterer.Unregister(&wrappingCollector{
wrappedCollector: c,
prefix: r.prefix,
labels: r.labels,
})
}
type wrappingCollector struct {
wrappedCollector Collector
prefix string
labels Labels
}
func (c *wrappingCollector) Collect(ch chan<- Metric) {
wrappedCh := make(chan Metric)
go func() {
c.wrappedCollector.Collect(wrappedCh)
close(wrappedCh)
}()
for m := range wrappedCh {
ch <- &wrappingMetric{
wrappedMetric: m,
prefix: c.prefix,
labels: c.labels,
}
}
}
func (c *wrappingCollector) Describe(ch chan<- *Desc) {
wrappedCh := make(chan *Desc)
go func() {
c.wrappedCollector.Describe(wrappedCh)
close(wrappedCh)
}()
for desc := range wrappedCh {
ch <- wrapDesc(desc, c.prefix, c.labels)
}
}
type wrappingMetric struct {
wrappedMetric Metric
prefix string
labels Labels
}
func (m *wrappingMetric) Desc() *Desc {
return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
}
func (m *wrappingMetric) Write(out *dto.Metric) error {
if err := m.wrappedMetric.Write(out); err != nil {
return err
}
if len(m.labels) == 0 {
// No wrapping labels.
return nil
}
for ln, lv := range m.labels {
out.Label = append(out.Label, &dto.LabelPair{
Name: proto.String(ln),
Value: proto.String(lv),
})
}
sort.Sort(labelPairSorter(out.Label))
return nil
}
func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
constLabels := Labels{}
for _, lp := range desc.constLabelPairs {
constLabels[*lp.Name] = *lp.Value
}
for ln, lv := range labels {
if _, alreadyUsed := constLabels[ln]; alreadyUsed {
return &Desc{
fqName: desc.fqName,
help: desc.help,
variableLabels: desc.variableLabels,
constLabelPairs: desc.constLabelPairs,
err: fmt.Errorf("attempted wrapping with already existing label name %q", ln),
}
}
constLabels[ln] = lv
}
// NewDesc will do remaining validations.
newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
// Propagate errors if there was any. This will override any errer
// created by NewDesc above, i.e. earlier errors get precedence.
if desc.err != nil {
newDesc.err = desc.err
}
return newDesc
}
...@@ -359,7 +359,7 @@ func (p *TextParser) startLabelValue() stateFn { ...@@ -359,7 +359,7 @@ func (p *TextParser) startLabelValue() stateFn {
} }
return p.readingValue return p.readingValue
default: default:
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
return nil return nil
} }
} }
......
...@@ -161,16 +161,28 @@ ...@@ -161,16 +161,28 @@
"revisionTime": "2016-01-10T10:55:54Z" "revisionTime": "2016-01-10T10:55:54Z"
}, },
{ {
"checksumSHA1": "5IUpj+w/TVy3rj8oL3lFS0p7TEc=", "checksumSHA1": "frS661rlSEZWE9CezHhnFioQK/I=",
"path": "github.com/prometheus/client_golang/prometheus", "path": "github.com/prometheus/client_golang/prometheus",
"revision": "bcbbc08eb2ddff3af83bbf11e7ec13b4fd730b6e", "revision": "1cafe34db7fdec6022e17e00e1c1ea501022f3e4",
"revisionTime": "2018-07-13T20:10:52Z" "revisionTime": "2018-10-15T14:52:39Z",
"version": "v0.9",
"versionExact": "v0.9.0"
},
{
"checksumSHA1": "UBqhkyjCz47+S19MVTigxJ2VjVQ=",
"path": "github.com/prometheus/client_golang/prometheus/internal",
"revision": "1cafe34db7fdec6022e17e00e1c1ea501022f3e4",
"revisionTime": "2018-10-15T14:52:39Z",
"version": "v0.9",
"versionExact": "v0.9.0"
}, },
{ {
"checksumSHA1": "d5BiEvD8MrgpWQ6PQJUvawJsMak=", "checksumSHA1": "d5BiEvD8MrgpWQ6PQJUvawJsMak=",
"path": "github.com/prometheus/client_golang/prometheus/promhttp", "path": "github.com/prometheus/client_golang/prometheus/promhttp",
"revision": "bcbbc08eb2ddff3af83bbf11e7ec13b4fd730b6e", "revision": "1cafe34db7fdec6022e17e00e1c1ea501022f3e4",
"revisionTime": "2018-07-13T20:10:52Z" "revisionTime": "2018-10-15T14:52:39Z",
"version": "v0.9",
"versionExact": "v0.9.0"
}, },
{ {
"checksumSHA1": "V8xkqgmP66sq2ZW4QO5wi9a4oZE=", "checksumSHA1": "V8xkqgmP66sq2ZW4QO5wi9a4oZE=",
...@@ -179,22 +191,22 @@ ...@@ -179,22 +191,22 @@
"revisionTime": "2018-07-12T10:51:10Z" "revisionTime": "2018-07-12T10:51:10Z"
}, },
{ {
"checksumSHA1": "vPdC/DzEm7YbzRir2wwnpLPfay8=", "checksumSHA1": "hGf3xT6gRaJh2zAEbWj9YnV+K+0=",
"path": "github.com/prometheus/common/expfmt", "path": "github.com/prometheus/common/expfmt",
"revision": "7600349dcfe1abd18d72d3a1770870d9800a7801", "revision": "bcb74de08d37a417cb6789eec1d6c810040f0470",
"revisionTime": "2018-05-18T15:47:59Z" "revisionTime": "2018-10-15T12:42:27Z"
}, },
{ {
"checksumSHA1": "GWlM3d2vPYyNATtTFgftS10/A9w=", "checksumSHA1": "GWlM3d2vPYyNATtTFgftS10/A9w=",
"path": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", "path": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
"revision": "7600349dcfe1abd18d72d3a1770870d9800a7801", "revision": "bcb74de08d37a417cb6789eec1d6c810040f0470",
"revisionTime": "2018-05-18T15:47:59Z" "revisionTime": "2018-10-15T12:42:27Z"
}, },
{ {
"checksumSHA1": "EXTRY7DL9gFW8c341Dk6LDXCBn8=", "checksumSHA1": "EXTRY7DL9gFW8c341Dk6LDXCBn8=",
"path": "github.com/prometheus/common/model", "path": "github.com/prometheus/common/model",
"revision": "7600349dcfe1abd18d72d3a1770870d9800a7801", "revision": "bcb74de08d37a417cb6789eec1d6c810040f0470",
"revisionTime": "2018-05-18T15:47:59Z" "revisionTime": "2018-10-15T12:42:27Z"
}, },
{ {
"checksumSHA1": "jo/zxF+Pfj5yZjReTKGOACq9IBs=", "checksumSHA1": "jo/zxF+Pfj5yZjReTKGOACq9IBs=",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment