vendor: github.com/prometheus/client_golang v1.12.1

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2022-05-04 20:54:58 +02:00
parent 985711c1f4
commit ec47096efc
No known key found for this signature in database
GPG key ID: 76698F39D527CE8C
574 changed files with 101741 additions and 22828 deletions

View file

@ -1 +1 @@
See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
See [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/client_golang/prometheus.svg)](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus).

View file

@ -0,0 +1,38 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import "runtime/debug"
// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewBuildInfoCollector instead.
func NewBuildInfoCollector() Collector {
path, version, sum := "unknown", "unknown", "unknown"
if bi, ok := debug.ReadBuildInfo(); ok {
path = bi.Main.Path
version = bi.Main.Version
sum = bi.Main.Sum
}
c := &selfCollector{MustNewConstMetric(
NewDesc(
"go_build_info",
"Build information about the main Go module.",
nil, Labels{"path": path, "version": version, "checksum": sum},
),
GaugeValue, 1)}
c.init(c.self)
return c
}

View file

@ -118,3 +118,11 @@ func (c *selfCollector) Describe(ch chan<- *Desc) {
func (c *selfCollector) Collect(ch chan<- Metric) {
ch <- c.self
}
// collectorMetric is a metric that is also a collector.
// Because of selfCollector, most (if not all) Metrics in
// this package are also collectors.
type collectorMetric interface {
Metric
Collector
}

View file

@ -17,6 +17,7 @@ import (
"errors"
"math"
"sync/atomic"
"time"
dto "github.com/prometheus/client_model/go"
)
@ -42,11 +43,27 @@ type Counter interface {
Add(float64)
}
// ExemplarAdder is implemented by Counters that offer the option of adding a
// value to the Counter together with an exemplar. Its AddWithExemplar method
// works like the Add method of the Counter interface but also replaces the
// currently saved exemplar (if any) with a new one, created from the provided
// value, the current time as timestamp, and the provided labels. Empty Labels
// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
// of the provided labels are invalid, or if the provided labels contain more
// than 64 runes in total.
type ExemplarAdder interface {
AddWithExemplar(value float64, exemplar Labels)
}
// CounterOpts is an alias for Opts. See there for doc comments.
type CounterOpts Opts
// NewCounter creates a new Counter based on the provided CounterOpts.
//
// The returned implementation also implements ExemplarAdder. It is safe to
// perform the corresponding type assertion.
//
// The returned implementation tracks the counter value in two separate
// variables, a float64 and a uint64. The latter is used to track calls of the
// Inc method and calls of the Add method with a value that can be represented
@ -61,7 +78,7 @@ func NewCounter(opts CounterOpts) Counter {
nil,
opts.ConstLabels,
)
result := &counter{desc: desc, labelPairs: desc.constLabelPairs}
result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now}
result.init(result) // Init self-collection.
return result
}
@ -78,6 +95,9 @@ type counter struct {
desc *Desc
labelPairs []*dto.LabelPair
exemplar atomic.Value // Containing nil or a *dto.Exemplar.
now func() time.Time // To mock out time.Now() for testing.
}
func (c *counter) Desc() *Desc {
@ -88,6 +108,7 @@ func (c *counter) Add(v float64) {
if v < 0 {
panic(errors.New("counter cannot decrease in value"))
}
ival := uint64(v)
if float64(ival) == v {
atomic.AddUint64(&c.valInt, ival)
@ -103,16 +124,41 @@ func (c *counter) Add(v float64) {
}
}
func (c *counter) AddWithExemplar(v float64, e Labels) {
c.Add(v)
c.updateExemplar(v, e)
}
func (c *counter) Inc() {
atomic.AddUint64(&c.valInt, 1)
}
func (c *counter) Write(out *dto.Metric) error {
func (c *counter) get() float64 {
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
ival := atomic.LoadUint64(&c.valInt)
val := fval + float64(ival)
return fval + float64(ival)
}
return populateMetric(CounterValue, val, c.labelPairs, out)
func (c *counter) Write(out *dto.Metric) error {
val := c.get()
var exemplar *dto.Exemplar
if e := c.exemplar.Load(); e != nil {
exemplar = e.(*dto.Exemplar)
}
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
}
func (c *counter) updateExemplar(v float64, l Labels) {
if l == nil {
return
}
e, err := newExemplar(v, c.now(), l)
if err != nil {
panic(err)
}
c.exemplar.Store(e)
}
// CounterVec is a Collector that bundles a set of Counters that all share the
@ -121,7 +167,7 @@ func (c *counter) Write(out *dto.Metric) error {
// (e.g. number of HTTP requests, partitioned by response code and
// method). Create instances with NewCounterVec.
type CounterVec struct {
*metricVec
*MetricVec
}
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
@ -134,11 +180,11 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
opts.ConstLabels,
)
return &CounterVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
}
result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
result.init(result) // Init self-collection.
return result
}),
@ -146,7 +192,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
}
// GetMetricWithLabelValues returns the Counter for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Counter is created.
//
// It is possible to call this method without using the returned Counter to only
@ -160,7 +206,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
// Counter with the same label values is created later.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@ -169,7 +215,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Counter), err
}
@ -177,19 +223,19 @@ func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
}
// GetMetricWith returns the Counter for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Counter is created. Implications of
// creating a Counter without using it and keeping the Counter for later use are
// the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
metric, err := v.metricVec.getMetricWith(labels)
metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Counter), err
}
@ -233,7 +279,7 @@ func (v *CounterVec) With(labels Labels) Counter {
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
vec, err := v.curryWith(labels)
vec, err := v.MetricVec.CurryWith(labels)
if vec != nil {
return &CounterVec{vec}, err
}
@ -267,6 +313,8 @@ type CounterFunc interface {
// provided function must be concurrency-safe. The function should also honor
// the contract for a Counter (values only go up, not down), but compliance will
// not be checked.
//
// Check out the ExampleGaugeFunc examples for the similar GaugeFunc.
func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
return newValueFunc(NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),

View file

@ -19,6 +19,8 @@ import (
"sort"
"strings"
"github.com/cespare/xxhash/v2"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
@ -49,7 +51,7 @@ type Desc struct {
// constLabelPairs contains precalculated DTO label pairs based on
// the constant labels.
constLabelPairs []*dto.LabelPair
// VariableLabels contains names of labels for which the metric
// variableLabels contains names of labels for which the metric
// maintains variable values.
variableLabels []string
// id is a hash of the values of the ConstLabels and fqName. This
@ -126,24 +128,24 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
return d
}
vh := hashNew()
xxh := xxhash.New()
for _, val := range labelValues {
vh = hashAdd(vh, val)
vh = hashAddByte(vh, separatorByte)
xxh.WriteString(val)
xxh.Write(separatorByteSlice)
}
d.id = vh
d.id = xxh.Sum64()
// Sort labelNames so that order doesn't matter for the hash.
sort.Strings(labelNames)
// Now hash together (in this order) the help string and the sorted
// label names.
lh := hashNew()
lh = hashAdd(lh, help)
lh = hashAddByte(lh, separatorByte)
xxh.Reset()
xxh.WriteString(help)
xxh.Write(separatorByteSlice)
for _, labelName := range labelNames {
lh = hashAdd(lh, labelName)
lh = hashAddByte(lh, separatorByte)
xxh.WriteString(labelName)
xxh.Write(separatorByteSlice)
}
d.dimHash = lh
d.dimHash = xxh.Sum64()
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
for n, v := range constLabels {

View file

@ -84,25 +84,21 @@
// of those four metric types can be found in the Prometheus docs:
// https://prometheus.io/docs/concepts/metric_types/
//
// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
// Prometheus server not to assume anything about its type.
//
// In addition to the fundamental metric types Gauge, Counter, Summary,
// Histogram, and Untyped, a very important part of the Prometheus data model is
// the partitioning of samples along dimensions called labels, which results in
// In addition to the fundamental metric types Gauge, Counter, Summary, and
// Histogram, a very important part of the Prometheus data model is the
// partitioning of samples along dimensions called labels, which results in
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
// HistogramVec, and UntypedVec.
// and HistogramVec.
//
// While only the fundamental metric types implement the Metric interface, both
// the metrics and their vector versions implement the Collector interface. A
// Collector manages the collection of a number of Metrics, but for convenience,
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
// SummaryVec, HistogramVec, and UntypedVec are not.
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and
// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec,
// and HistogramVec are not.
//
// To create instances of Metrics and their vector versions, you need a suitable
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or
// UntypedOpts.
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
//
// Custom Collectors and constant Metrics
//
@ -118,13 +114,16 @@
// existing numbers into Prometheus Metrics during collection. An own
// implementation of the Collector interface is perfect for that. You can create
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
// NewConstSummary (and their respective Must… versions). That will happen in
// the Collect method. The Describe method has to return separate Desc
// instances, representative of the “throw-away” metrics to be created later.
// NewDesc comes in handy to create those Desc instances. Alternatively, you
// could return no Desc at all, which will mark the Collector “unchecked”. No
// checks are performed at registration time, but metric consistency will still
// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
// NewConstSummary (and their respective Must… versions). NewConstMetric is used
// for all metric types with just a float64 as their value: Counter, Gauge, and
// a special “type” called Untyped. Use the latter if you are not sure if the
// mirrored metric is a Counter or a Gauge. Creation of the Metric instance
// happens in the Collect method. The Describe method has to return separate
// Desc instances, representative of the “throw-away” metrics to be created
// later. NewDesc comes in handy to create those Desc instances. Alternatively,
// you could return no Desc at all, which will mark the Collector “unchecked”.
// No checks are performed at registration time, but metric consistency will
// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape
// errors. Thus, with unchecked Collectors, the responsibility to not collect
// metrics that lead to inconsistencies in the total scrape result lies with the
// implementer of the Collector. While this is not a desirable state, it is

View file

@ -22,43 +22,10 @@ type expvarCollector struct {
exports map[string]*Desc
}
// NewExpvarCollector returns a newly allocated expvar Collector that still has
// to be registered with a Prometheus registry.
// NewExpvarCollector is the obsolete version of collectors.NewExpvarCollector.
// See there for documentation.
//
// An expvar Collector collects metrics from the expvar interface. It provides a
// quick way to expose numeric values that are already exported via expvar as
// Prometheus metrics. Note that the data models of expvar and Prometheus are
// fundamentally different, and that the expvar Collector is inherently slower
// than native Prometheus metrics. Thus, the expvar Collector is probably great
// for experiments and prototying, but you should seriously consider a more
// direct implementation of Prometheus metrics for monitoring production
// systems.
//
// The exports map has the following meaning:
//
// The keys in the map correspond to expvar keys, i.e. for every expvar key you
// want to export as Prometheus metric, you need an entry in the exports
// map. The descriptor mapped to each key describes how to export the expvar
// value. It defines the name and the help string of the Prometheus metric
// proxying the expvar value. The type will always be Untyped.
//
// For descriptors without variable labels, the expvar value must be a number or
// a bool. The number is then directly exported as the Prometheus sample
// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
// that are not numbers or bools are silently ignored.
//
// If the descriptor has one variable label, the expvar value must be an expvar
// map. The keys in the expvar map become the various values of the one
// Prometheus label. The values in the expvar map must be numbers or bools again
// as above.
//
// For descriptors with more than one variable label, the expvar must be a
// nested expvar map, i.e. where the values of the topmost map are maps again
// etc. until a depth is reached that corresponds to the number of labels. The
// leaves of that structure must be numbers or bools as above to serve as the
// sample values.
//
// Anything that does not fit into the scheme above is silently ignored.
// Deprecated: Use collectors.NewExpvarCollector instead.
func NewExpvarCollector(exports map[string]*Desc) Collector {
return &expvarCollector{
exports: exports,

View file

@ -123,7 +123,7 @@ func (g *gauge) Sub(val float64) {
func (g *gauge) Write(out *dto.Metric) error {
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
return populateMetric(GaugeValue, val, g.labelPairs, out)
return populateMetric(GaugeValue, val, g.labelPairs, nil, out)
}
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
@ -132,7 +132,7 @@ func (g *gauge) Write(out *dto.Metric) error {
// (e.g. number of operations queued, partitioned by user and operation
// type). Create instances with NewGaugeVec.
type GaugeVec struct {
*metricVec
*MetricVec
}
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
@ -145,11 +145,11 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
opts.ConstLabels,
)
return &GaugeVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
}
result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection.
return result
}),
@ -157,7 +157,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
}
// GetMetricWithLabelValues returns the Gauge for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Gauge is created.
//
// It is possible to call this method without using the returned Gauge to only
@ -172,7 +172,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
// example.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@ -180,7 +180,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Gauge), err
}
@ -188,19 +188,19 @@ func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
}
// GetMetricWith returns the Gauge for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Gauge is created. Implications of
// creating a Gauge without using it and keeping the Gauge for later use are
// the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
metric, err := v.metricVec.getMetricWith(labels)
metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Gauge), err
}
@ -244,7 +244,7 @@ func (v *GaugeVec) With(labels Labels) Gauge {
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
vec, err := v.curryWith(labels)
vec, err := v.MetricVec.CurryWith(labels)
if vec != nil {
return &GaugeVec{vec}, err
}
@ -273,9 +273,12 @@ type GaugeFunc interface {
// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
// value reported is determined by calling the given function from within the
// Write method. Take into account that metric collection may happen
// concurrently. If that results in concurrent calls to Write, like in the case
// where a GaugeFunc is directly registered with Prometheus, the provided
// function must be concurrency-safe.
// concurrently. Therefore, it must be safe to call the provided function
// concurrently.
//
// NewGaugeFunc is a good way to create an “info” style metric with a constant
// value of 1. Example:
// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56
func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
return newValueFunc(NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),

View file

@ -16,53 +16,209 @@ package prometheus
import (
"runtime"
"runtime/debug"
"sync"
"time"
)
type goCollector struct {
func goRuntimeMemStats() memStatsMetrics {
return memStatsMetrics{
{
desc: NewDesc(
memstatNamespace("alloc_bytes"),
"Number of bytes allocated and still in use.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("alloc_bytes_total"),
"Total number of bytes allocated, even if freed.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("sys_bytes"),
"Number of bytes obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("lookups_total"),
"Total number of pointer lookups.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("mallocs_total"),
"Total number of mallocs.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("frees_total"),
"Total number of frees.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("heap_alloc_bytes"),
"Number of heap bytes allocated and still in use.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_sys_bytes"),
"Number of heap bytes obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_idle_bytes"),
"Number of heap bytes waiting to be used.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_inuse_bytes"),
"Number of heap bytes that are in use.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_released_bytes"),
"Number of heap bytes released to OS.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_objects"),
"Number of allocated objects.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("stack_inuse_bytes"),
"Number of bytes in use by the stack allocator.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("stack_sys_bytes"),
"Number of bytes obtained from system for stack allocator.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mspan_inuse_bytes"),
"Number of bytes in use by mspan structures.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mspan_sys_bytes"),
"Number of bytes used for mspan structures obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mcache_inuse_bytes"),
"Number of bytes in use by mcache structures.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mcache_sys_bytes"),
"Number of bytes used for mcache structures obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("buck_hash_sys_bytes"),
"Number of bytes used by the profiling bucket hash table.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("gc_sys_bytes"),
"Number of bytes used for garbage collection system metadata.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("other_sys_bytes"),
"Number of bytes used for other system allocations.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("next_gc_bytes"),
"Number of heap bytes when next garbage collection will take place.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("gc_cpu_fraction"),
"The fraction of this program's available CPU time used by the GC since the program started.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
valType: GaugeValue,
},
}
}
type baseGoCollector struct {
goroutinesDesc *Desc
threadsDesc *Desc
gcDesc *Desc
gcLastTimeDesc *Desc
goInfoDesc *Desc
// ms... are memstats related.
msLast *runtime.MemStats // Previously collected memstats.
msLastTimestamp time.Time
msMtx sync.Mutex // Protects msLast and msLastTimestamp.
msMetrics memStatsMetrics
msRead func(*runtime.MemStats) // For mocking in tests.
msMaxWait time.Duration // Wait time for fresh memstats.
msMaxAge time.Duration // Maximum allowed age of old memstats.
}
// NewGoCollector returns a collector that exports metrics about the current Go
// process. This includes memory stats. To collect those, runtime.ReadMemStats
// is called. This requires to “stop the world”, which usually only happens for
// garbage collection (GC). Take the following implications into account when
// deciding whether to use the Go collector:
//
// 1. The performance impact of stopping the world is the more relevant the more
// frequently metrics are collected. However, with Go1.9 or later the
// stop-the-world time per metrics collection is very short (~25µs) so that the
// performance impact will only matter in rare cases. However, with older Go
// versions, the stop-the-world duration depends on the heap size and can be
// quite significant (~1.7 ms/GiB as per
// https://go-review.googlesource.com/c/go/+/34937).
//
// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the
// metrics collection happens to coincide with GC, it will only complete after
// GC has finished. Usually, GC is fast enough to not cause problems. However,
// with a very large heap, GC might take multiple seconds, which is enough to
// cause scrape timeouts in common setups. To avoid this problem, the Go
// collector will use the memstats from a previous collection if
// runtime.ReadMemStats takes more than 1s. However, if there are no previously
// collected memstats, or their collection is more than 5m ago, the collection
// will block until runtime.ReadMemStats succeeds. (The problem might be solved
// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go
// issue.)
func NewGoCollector() Collector {
return &goCollector{
func newBaseGoCollector() baseGoCollector {
return baseGoCollector{
goroutinesDesc: NewDesc(
"go_goroutines",
"Number of goroutines that currently exist.",
@ -73,245 +229,30 @@ func NewGoCollector() Collector {
nil, nil),
gcDesc: NewDesc(
"go_gc_duration_seconds",
"A summary of the GC invocation durations.",
"A summary of the pause duration of garbage collection cycles.",
nil, nil),
gcLastTimeDesc: NewDesc(
memstatNamespace("last_gc_time_seconds"),
"Number of seconds since 1970 of last garbage collection.",
nil, nil),
goInfoDesc: NewDesc(
"go_info",
"Information about the Go environment.",
nil, Labels{"version": runtime.Version()}),
msLast: &runtime.MemStats{},
msRead: runtime.ReadMemStats,
msMaxWait: time.Second,
msMaxAge: 5 * time.Minute,
msMetrics: memStatsMetrics{
{
desc: NewDesc(
memstatNamespace("alloc_bytes"),
"Number of bytes allocated and still in use.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("alloc_bytes_total"),
"Total number of bytes allocated, even if freed.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("sys_bytes"),
"Number of bytes obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("lookups_total"),
"Total number of pointer lookups.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("mallocs_total"),
"Total number of mallocs.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("frees_total"),
"Total number of frees.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("heap_alloc_bytes"),
"Number of heap bytes allocated and still in use.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_sys_bytes"),
"Number of heap bytes obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_idle_bytes"),
"Number of heap bytes waiting to be used.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_inuse_bytes"),
"Number of heap bytes that are in use.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_released_bytes"),
"Number of heap bytes released to OS.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_objects"),
"Number of allocated objects.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("stack_inuse_bytes"),
"Number of bytes in use by the stack allocator.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("stack_sys_bytes"),
"Number of bytes obtained from system for stack allocator.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mspan_inuse_bytes"),
"Number of bytes in use by mspan structures.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mspan_sys_bytes"),
"Number of bytes used for mspan structures obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mcache_inuse_bytes"),
"Number of bytes in use by mcache structures.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mcache_sys_bytes"),
"Number of bytes used for mcache structures obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("buck_hash_sys_bytes"),
"Number of bytes used by the profiling bucket hash table.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("gc_sys_bytes"),
"Number of bytes used for garbage collection system metadata.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("other_sys_bytes"),
"Number of bytes used for other system allocations.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("next_gc_bytes"),
"Number of heap bytes when next garbage collection will take place.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("last_gc_time_seconds"),
"Number of seconds since 1970 of last garbage collection.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("gc_cpu_fraction"),
"The fraction of this program's available CPU time used by the GC since the program started.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
valType: GaugeValue,
},
},
}
}
func memstatNamespace(s string) string {
return "go_memstats_" + s
}
// Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) {
func (c *baseGoCollector) Describe(ch chan<- *Desc) {
ch <- c.goroutinesDesc
ch <- c.threadsDesc
ch <- c.gcDesc
ch <- c.gcLastTimeDesc
ch <- c.goInfoDesc
for _, i := range c.msMetrics {
ch <- i.desc
}
}
// Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) {
var (
ms = &runtime.MemStats{}
done = make(chan struct{})
)
// Start reading memstats first as it might take a while.
go func() {
c.msRead(ms)
c.msMtx.Lock()
c.msLast = ms
c.msLastTimestamp = time.Now()
c.msMtx.Unlock()
close(done)
}()
func (c *baseGoCollector) Collect(ch chan<- Metric) {
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
n, _ := runtime.ThreadCreateProfile(nil)
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
@ -326,71 +267,19 @@ func (c *goCollector) Collect(ch chan<- Metric) {
}
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
timer := time.NewTimer(c.msMaxWait)
select {
case <-done: // Our own ReadMemStats succeeded in time. Use it.
timer.Stop() // Important for high collection frequencies to not pile up timers.
c.msCollect(ch, ms)
return
case <-timer.C: // Time out, use last memstats if possible. Continue below.
}
c.msMtx.Lock()
if time.Since(c.msLastTimestamp) < c.msMaxAge {
// Last memstats are recent enough. Collect from them under the lock.
c.msCollect(ch, c.msLast)
c.msMtx.Unlock()
return
}
// If we are here, the last memstats are too old or don't exist. We have
// to wait until our own ReadMemStats finally completes. For that to
// happen, we have to release the lock.
c.msMtx.Unlock()
<-done
c.msCollect(ch, ms)
}
func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
for _, i := range c.msMetrics {
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
}
func memstatNamespace(s string) string {
return "go_memstats_" + s
}
// memStatsMetrics provide description, value, and value type for memstat metrics.
// memStatsMetrics provide description, evaluator, runtime/metrics name, and
// value type for memstat metrics.
type memStatsMetrics []struct {
desc *Desc
eval func(*runtime.MemStats) float64
valType ValueType
}
// NewBuildInfoCollector returns a collector collecting a single metric
// "go_build_info" with the constant value 1 and three labels "path", "version",
// and "checksum". Their label values contain the main module path, version, and
// checksum, respectively. The labels will only have meaningful values if the
// binary is built with Go module support and from source code retrieved from
// the source repository (rather than the local file system). This is usually
// accomplished by building from outside of GOPATH, specifying the full address
// of the main package, e.g. "GO111MODULE=on go run
// github.com/prometheus/client_golang/examples/random". If built without Go
// module support, all label values will be "unknown". If built with Go module
// support but using the source code from the local file system, the "path" will
// be set appropriately, but "checksum" will be empty and "version" will be
// "(devel)".
//
// This collector uses only the build information for the main module. See
// https://github.com/povilasv/prommod for an example of a collector for the
// module dependencies.
func NewBuildInfoCollector() Collector {
path, version, sum := readBuildInfo()
c := &selfCollector{MustNewConstMetric(
NewDesc(
"go_build_info",
"Build information about the main Go module.",
nil, Labels{"path": path, "version": version, "checksum": sum},
),
GaugeValue, 1)}
c.init(c.self)
return c
}

View file

@ -0,0 +1,107 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !go1.17
// +build !go1.17
package prometheus
import (
"runtime"
"sync"
"time"
)
type goCollector struct {
base baseGoCollector
// ms... are memstats related.
msLast *runtime.MemStats // Previously collected memstats.
msLastTimestamp time.Time
msMtx sync.Mutex // Protects msLast and msLastTimestamp.
msMetrics memStatsMetrics
msRead func(*runtime.MemStats) // For mocking in tests.
msMaxWait time.Duration // Wait time for fresh memstats.
msMaxAge time.Duration // Maximum allowed age of old memstats.
}
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector() Collector {
return &goCollector{
base: newBaseGoCollector(),
msLast: &runtime.MemStats{},
msRead: runtime.ReadMemStats,
msMaxWait: time.Second,
msMaxAge: 5 * time.Minute,
msMetrics: goRuntimeMemStats(),
}
}
// Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) {
c.base.Describe(ch)
for _, i := range c.msMetrics {
ch <- i.desc
}
}
// Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) {
var (
ms = &runtime.MemStats{}
done = make(chan struct{})
)
// Start reading memstats first as it might take a while.
go func() {
c.msRead(ms)
c.msMtx.Lock()
c.msLast = ms
c.msLastTimestamp = time.Now()
c.msMtx.Unlock()
close(done)
}()
// Collect base non-memory metrics.
c.base.Collect(ch)
timer := time.NewTimer(c.msMaxWait)
select {
case <-done: // Our own ReadMemStats succeeded in time. Use it.
timer.Stop() // Important for high collection frequencies to not pile up timers.
c.msCollect(ch, ms)
return
case <-timer.C: // Time out, use last memstats if possible. Continue below.
}
c.msMtx.Lock()
if time.Since(c.msLastTimestamp) < c.msMaxAge {
// Last memstats are recent enough. Collect from them under the lock.
c.msCollect(ch, c.msLast)
c.msMtx.Unlock()
return
}
// If we are here, the last memstats are too old or don't exist. We have
// to wait until our own ReadMemStats finally completes. For that to
// happen, we have to release the lock.
c.msMtx.Unlock()
<-done
c.msCollect(ch, ms)
}
func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
for _, i := range c.msMetrics {
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
}
}

View file

@ -0,0 +1,408 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build go1.17
// +build go1.17
package prometheus
import (
"math"
"runtime"
"runtime/metrics"
"strings"
"sync"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
)
type goCollector struct {
base baseGoCollector
// mu protects updates to all fields ensuring a consistent
// snapshot is always produced by Collect.
mu sync.Mutex
// rm... fields all pertain to the runtime/metrics package.
rmSampleBuf []metrics.Sample
rmSampleMap map[string]*metrics.Sample
rmMetrics []collectorMetric
// With Go 1.17, the runtime/metrics package was introduced.
// From that point on, metric names produced by the runtime/metrics
// package could be generated from runtime/metrics names. However,
// these differ from the old names for the same values.
//
// This field exist to export the same values under the old names
// as well.
msMetrics memStatsMetrics
}
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector() Collector {
descriptions := metrics.All()
// Collect all histogram samples so that we can get their buckets.
// The API guarantees that the buckets are always fixed for the lifetime
// of the process.
var histograms []metrics.Sample
for _, d := range descriptions {
if d.Kind == metrics.KindFloat64Histogram {
histograms = append(histograms, metrics.Sample{Name: d.Name})
}
}
metrics.Read(histograms)
bucketsMap := make(map[string][]float64)
for i := range histograms {
bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
}
// Generate a Desc and ValueType for each runtime/metrics metric.
metricSet := make([]collectorMetric, 0, len(descriptions))
sampleBuf := make([]metrics.Sample, 0, len(descriptions))
sampleMap := make(map[string]*metrics.Sample, len(descriptions))
for i := range descriptions {
d := &descriptions[i]
namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(d)
if !ok {
// Just ignore this metric; we can't do anything with it here.
// If a user decides to use the latest version of Go, we don't want
// to fail here. This condition is tested elsewhere.
continue
}
// Set up sample buffer for reading, and a map
// for quick lookup of sample values.
sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
var m collectorMetric
if d.Kind == metrics.KindFloat64Histogram {
_, hasSum := rmExactSumMap[d.Name]
unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
m = newBatchHistogram(
NewDesc(
BuildFQName(namespace, subsystem, name),
d.Description,
nil,
nil,
),
internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
hasSum,
)
} else if d.Cumulative {
m = NewCounter(CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: name,
Help: d.Description,
})
} else {
m = NewGauge(GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: name,
Help: d.Description,
})
}
metricSet = append(metricSet, m)
}
return &goCollector{
base: newBaseGoCollector(),
rmSampleBuf: sampleBuf,
rmSampleMap: sampleMap,
rmMetrics: metricSet,
msMetrics: goRuntimeMemStats(),
}
}
// Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) {
c.base.Describe(ch)
for _, i := range c.msMetrics {
ch <- i.desc
}
for _, m := range c.rmMetrics {
ch <- m.Desc()
}
}
// Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) {
// Collect base non-memory metrics.
c.base.Collect(ch)
// Collect must be thread-safe, so prevent concurrent use of
// rmSampleBuf. Just read into rmSampleBuf but write all the data
// we get into our Metrics or MemStats.
//
// This lock also ensures that the Metrics we send out are all from
// the same updates, ensuring their mutual consistency insofar as
// is guaranteed by the runtime/metrics package.
//
// N.B. This locking is heavy-handed, but Collect is expected to be called
// relatively infrequently. Also the core operation here, metrics.Read,
// is fast (O(tens of microseconds)) so contention should certainly be
// low, though channel operations and any allocations may add to that.
c.mu.Lock()
defer c.mu.Unlock()
// Populate runtime/metrics sample buffer.
metrics.Read(c.rmSampleBuf)
// Update all our metrics from rmSampleBuf.
for i, sample := range c.rmSampleBuf {
// N.B. switch on concrete type because it's significantly more efficient
// than checking for the Counter and Gauge interface implementations. In
// this case, we control all the types here.
switch m := c.rmMetrics[i].(type) {
case *counter:
// Guard against decreases. This should never happen, but a failure
// to do so will result in a panic, which is a harsh consequence for
// a metrics collection bug.
v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
if v1 > v0 {
m.Add(unwrapScalarRMValue(sample.Value) - m.get())
}
m.Collect(ch)
case *gauge:
m.Set(unwrapScalarRMValue(sample.Value))
m.Collect(ch)
case *batchHistogram:
m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
m.Collect(ch)
default:
panic("unexpected metric type")
}
}
// ms is a dummy MemStats that we populate ourselves so that we can
// populate the old metrics from it.
var ms runtime.MemStats
memStatsFromRM(&ms, c.rmSampleMap)
for _, i := range c.msMetrics {
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
}
}
// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed
// to be scalar and returns the equivalent float64 value. Panics if the
// value is not scalar.
func unwrapScalarRMValue(v metrics.Value) float64 {
switch v.Kind() {
case metrics.KindUint64:
return float64(v.Uint64())
case metrics.KindFloat64:
return v.Float64()
case metrics.KindBad:
// Unsupported metric.
//
// This should never happen because we always populate our metric
// set from the runtime/metrics package.
panic("unexpected unsupported metric")
default:
// Unsupported metric kind.
//
// This should never happen because we check for this during initialization
// and flag and filter metrics whose kinds we don't understand.
panic("unexpected unsupported metric kind")
}
}
var rmExactSumMap = map[string]string{
"/gc/heap/allocs-by-size:bytes": "/gc/heap/allocs:bytes",
"/gc/heap/frees-by-size:bytes": "/gc/heap/frees:bytes",
}
// exactSumFor takes a runtime/metrics metric name (that is assumed to
// be of kind KindFloat64Histogram) and returns its exact sum and whether
// its exact sum exists.
//
// The runtime/metrics API for histograms doesn't currently expose exact
// sums, but some of the other metrics are in fact exact sums of histograms.
func (c *goCollector) exactSumFor(rmName string) float64 {
sumName, ok := rmExactSumMap[rmName]
if !ok {
return 0
}
s, ok := c.rmSampleMap[sumName]
if !ok {
return 0
}
return unwrapScalarRMValue(s.Value)
}
func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
lookupOrZero := func(name string) uint64 {
if s, ok := rm[name]; ok {
return s.Value.Uint64()
}
return 0
}
// Currently, MemStats adds tiny alloc count to both Mallocs AND Frees.
// The reason for this is because MemStats couldn't be extended at the time
// but there was a desire to have Mallocs at least be a little more representative,
// while having Mallocs - Frees still represent a live object count.
// Unfortunately, MemStats doesn't actually export a large allocation count,
// so it's impossible to pull this number out directly.
tinyAllocs := lookupOrZero("/gc/heap/tiny/allocs:objects")
ms.Mallocs = lookupOrZero("/gc/heap/allocs:objects") + tinyAllocs
ms.Frees = lookupOrZero("/gc/heap/frees:objects") + tinyAllocs
ms.TotalAlloc = lookupOrZero("/gc/heap/allocs:bytes")
ms.Sys = lookupOrZero("/memory/classes/total:bytes")
ms.Lookups = 0 // Already always zero.
ms.HeapAlloc = lookupOrZero("/memory/classes/heap/objects:bytes")
ms.Alloc = ms.HeapAlloc
ms.HeapInuse = ms.HeapAlloc + lookupOrZero("/memory/classes/heap/unused:bytes")
ms.HeapReleased = lookupOrZero("/memory/classes/heap/released:bytes")
ms.HeapIdle = ms.HeapReleased + lookupOrZero("/memory/classes/heap/free:bytes")
ms.HeapSys = ms.HeapInuse + ms.HeapIdle
ms.HeapObjects = lookupOrZero("/gc/heap/objects:objects")
ms.StackInuse = lookupOrZero("/memory/classes/heap/stacks:bytes")
ms.StackSys = ms.StackInuse + lookupOrZero("/memory/classes/os-stacks:bytes")
ms.MSpanInuse = lookupOrZero("/memory/classes/metadata/mspan/inuse:bytes")
ms.MSpanSys = ms.MSpanInuse + lookupOrZero("/memory/classes/metadata/mspan/free:bytes")
ms.MCacheInuse = lookupOrZero("/memory/classes/metadata/mcache/inuse:bytes")
ms.MCacheSys = ms.MCacheInuse + lookupOrZero("/memory/classes/metadata/mcache/free:bytes")
ms.BuckHashSys = lookupOrZero("/memory/classes/profiling/buckets:bytes")
ms.GCSys = lookupOrZero("/memory/classes/metadata/other:bytes")
ms.OtherSys = lookupOrZero("/memory/classes/other:bytes")
ms.NextGC = lookupOrZero("/gc/heap/goal:bytes")
// N.B. LastGC is omitted because runtime.GCStats already has this.
// See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
// for more details.
ms.LastGC = 0
// N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
// and often misleading due to the fact that it's an average over the lifetime
// of the process.
// See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
// for more details.
ms.GCCPUFraction = 0
}
// batchHistogram is a mutable histogram that is updated
// in batches.
type batchHistogram struct {
selfCollector
// Static fields updated only once.
desc *Desc
hasSum bool
// Because this histogram operates in batches, it just uses a
// single mutex for everything. updates are always serialized
// but Write calls may operate concurrently with updates.
// Contention between these two sources should be rare.
mu sync.Mutex
buckets []float64 // Inclusive lower bounds, like runtime/metrics.
counts []uint64
sum float64 // Used if hasSum is true.
}
// newBatchHistogram creates a new batch histogram value with the given
// Desc, buckets, and whether or not it has an exact sum available.
//
// buckets must always be from the runtime/metrics package, following
// the same conventions.
func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
h := &batchHistogram{
desc: desc,
buckets: buckets,
// Because buckets follows runtime/metrics conventions, there's
// 1 more value in the buckets list than there are buckets represented,
// because in runtime/metrics, the bucket values represent *boundaries*,
// and non-Inf boundaries are inclusive lower bounds for that bucket.
counts: make([]uint64, len(buckets)-1),
hasSum: hasSum,
}
h.init(h)
return h
}
// update updates the batchHistogram from a runtime/metrics histogram.
//
// sum must be provided if the batchHistogram was created to have an exact sum.
// h.buckets must be a strict subset of his.Buckets.
func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
counts, buckets := his.Counts, his.Buckets
h.mu.Lock()
defer h.mu.Unlock()
// Clear buckets.
for i := range h.counts {
h.counts[i] = 0
}
// Copy and reduce buckets.
var j int
for i, count := range counts {
h.counts[j] += count
if buckets[i+1] == h.buckets[j+1] {
j++
}
}
if h.hasSum {
h.sum = sum
}
}
func (h *batchHistogram) Desc() *Desc {
return h.desc
}
func (h *batchHistogram) Write(out *dto.Metric) error {
h.mu.Lock()
defer h.mu.Unlock()
sum := float64(0)
if h.hasSum {
sum = h.sum
}
dtoBuckets := make([]*dto.Bucket, 0, len(h.counts))
totalCount := uint64(0)
for i, count := range h.counts {
totalCount += count
if !h.hasSum {
// N.B. This computed sum is an underestimate.
sum += h.buckets[i] * float64(count)
}
// Skip the +Inf bucket, but only for the bucket list.
// It must still count for sum and totalCount.
if math.IsInf(h.buckets[i+1], 1) {
break
}
// Float64Histogram's upper bound is exclusive, so make it inclusive
// by obtaining the next float64 value down, in order.
upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i])
dtoBuckets = append(dtoBuckets, &dto.Bucket{
CumulativeCount: proto.Uint64(totalCount),
UpperBound: proto.Float64(upperBound),
})
}
out.Histogram = &dto.Histogram{
Bucket: dtoBuckets,
SampleCount: proto.Uint64(totalCount),
SampleSum: proto.Float64(sum),
}
return nil
}

View file

@ -20,7 +20,9 @@ import (
"sort"
"sync"
"sync/atomic"
"time"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
@ -45,7 +47,12 @@ type Histogram interface {
Metric
Collector
// Observe adds a single observation to the histogram.
// Observe adds a single observation to the histogram. Observations are
// usually positive or zero. Negative observations are accepted but
// prevent current versions of Prometheus from properly detecting
// counter resets in the sum of observations. See
// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
// for details.
Observe(float64)
}
@ -109,6 +116,34 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
return buckets
}
// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is
// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted
// and not included in the returned slice. The returned slice is meant to be
// used for the Buckets field of HistogramOpts.
//
// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
func ExponentialBucketsRange(min, max float64, count int) []float64 {
if count < 1 {
panic("ExponentialBucketsRange count needs a positive count")
}
if min <= 0 {
panic("ExponentialBucketsRange min needs to be greater than 0")
}
// Formula for exponential buckets.
// max = min*growthFactor^(bucketCount-1)
// We know max/min and highest bucket. Solve for growthFactor.
growthFactor := math.Pow(max/min, 1.0/float64(count-1))
// Now that we know growthFactor, solve for each bucket.
buckets := make([]float64, count)
for i := 1; i <= count; i++ {
buckets[i-1] = min * math.Pow(growthFactor, float64(i-1))
}
return buckets
}
// HistogramOpts bundles the options for creating a Histogram metric. It is
// mandatory to set Name to a non-empty string. All other fields are optional
// and can safely be left at their zero value, although it is strongly
@ -138,7 +173,7 @@ type HistogramOpts struct {
// better covered by target labels set by the scraping Prometheus
// server, or by one specific metric (e.g. a build_info or a
// machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels
// Buckets defines the buckets into which observations are counted. Each
@ -151,6 +186,10 @@ type HistogramOpts struct {
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
// panics if the buckets in HistogramOpts are not in strictly increasing order.
//
// The returned implementation also implements ExemplarObserver. It is safe to
// perform the corresponding type assertion. Exemplars are tracked separately
// for each bucket.
func NewHistogram(opts HistogramOpts) Histogram {
return newHistogram(
NewDesc(
@ -186,8 +225,9 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
h := &histogram{
desc: desc,
upperBounds: opts.Buckets,
labelPairs: makeLabelPairs(desc, labelValues),
counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}},
labelPairs: MakeLabelPairs(desc, labelValues),
counts: [2]*histogramCounts{{}, {}},
now: time.Now,
}
for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 {
@ -205,9 +245,10 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
}
}
// Finally we know the final length of h.upperBounds and can make buckets
// for both counts:
// for both counts as well as exemplars:
h.counts[0].buckets = make([]uint64, len(h.upperBounds))
h.counts[1].buckets = make([]uint64, len(h.upperBounds))
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
h.init(h) // Init self-collection.
return h
@ -254,6 +295,9 @@ type histogram struct {
upperBounds []float64
labelPairs []*dto.LabelPair
exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
now func() time.Time // To mock out time.Now() for testing.
}
func (h *histogram) Desc() *Desc {
@ -261,36 +305,13 @@ func (h *histogram) Desc() *Desc {
}
func (h *histogram) Observe(v float64) {
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
// slightly faster than the binary search. If we really care, we could
// switch from one search strategy to the other depending on the number
// of buckets.
//
// Microbenchmarks (BenchmarkHistogramNoLabels):
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
i := sort.SearchFloat64s(h.upperBounds, v)
h.observe(v, h.findBucket(v))
}
// We increment h.countAndHotIdx so that the counter in the lower
// 63 bits gets incremented. At the same time, we get the new value
// back, which we can use to find the currently-hot counts.
n := atomic.AddUint64(&h.countAndHotIdx, 1)
hotCounts := h.counts[n>>63]
if i < len(h.upperBounds) {
atomic.AddUint64(&hotCounts.buckets[i], 1)
}
for {
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
break
}
}
// Increment count last as we take it as a signal that the observation
// is complete.
atomic.AddUint64(&hotCounts.count, 1)
func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
i := h.findBucket(v)
h.observe(v, i)
h.updateExemplar(v, i, e)
}
func (h *histogram) Write(out *dto.Metric) error {
@ -329,6 +350,18 @@ func (h *histogram) Write(out *dto.Metric) error {
CumulativeCount: proto.Uint64(cumCount),
UpperBound: proto.Float64(upperBound),
}
if e := h.exemplars[i].Load(); e != nil {
his.Bucket[i].Exemplar = e.(*dto.Exemplar)
}
}
// If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly.
if e := h.exemplars[len(h.upperBounds)].Load(); e != nil {
b := &dto.Bucket{
CumulativeCount: proto.Uint64(count),
UpperBound: proto.Float64(math.Inf(1)),
Exemplar: e.(*dto.Exemplar),
}
his.Bucket = append(his.Bucket, b)
}
out.Histogram = his
@ -352,13 +385,64 @@ func (h *histogram) Write(out *dto.Metric) error {
return nil
}
// findBucket returns the index of the bucket for the provided value, or
// len(h.upperBounds) for the +Inf bucket.
func (h *histogram) findBucket(v float64) int {
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
// slightly faster than the binary search. If we really care, we could
// switch from one search strategy to the other depending on the number
// of buckets.
//
// Microbenchmarks (BenchmarkHistogramNoLabels):
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
return sort.SearchFloat64s(h.upperBounds, v)
}
// observe is the implementation for Observe without the findBucket part.
func (h *histogram) observe(v float64, bucket int) {
// We increment h.countAndHotIdx so that the counter in the lower
// 63 bits gets incremented. At the same time, we get the new value
// back, which we can use to find the currently-hot counts.
n := atomic.AddUint64(&h.countAndHotIdx, 1)
hotCounts := h.counts[n>>63]
if bucket < len(h.upperBounds) {
atomic.AddUint64(&hotCounts.buckets[bucket], 1)
}
for {
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
break
}
}
// Increment count last as we take it as a signal that the observation
// is complete.
atomic.AddUint64(&hotCounts.count, 1)
}
// updateExemplar replaces the exemplar for the provided bucket. With empty
// labels, it's a no-op. It panics if any of the labels is invalid.
func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
if l == nil {
return
}
e, err := newExemplar(v, h.now(), l)
if err != nil {
panic(err)
}
h.exemplars[bucket].Store(e)
}
// HistogramVec is a Collector that bundles a set of Histograms that all share the
// same Desc, but have different values for their variable labels. This is used
// if you want to count the same thing partitioned by various dimensions
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewHistogramVec.
type HistogramVec struct {
*metricVec
*MetricVec
}
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
@ -371,14 +455,14 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
opts.ConstLabels,
)
return &HistogramVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...)
}),
}
}
// GetMetricWithLabelValues returns the Histogram for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Histogram is created.
//
// It is possible to call this method without using the returned Histogram to only
@ -393,7 +477,7 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
// example.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@ -402,7 +486,7 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Observer), err
}
@ -410,19 +494,19 @@ func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error)
}
// GetMetricWith returns the Histogram for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Histogram is created. Implications of
// creating a Histogram without using it and keeping the Histogram for later use
// are the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
metric, err := v.metricVec.getMetricWith(labels)
metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Observer), err
}
@ -466,7 +550,7 @@ func (v *HistogramVec) With(labels Labels) Observer {
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
vec, err := v.curryWith(labels)
vec, err := v.MetricVec.CurryWith(labels)
if vec != nil {
return &HistogramVec{vec}, err
}
@ -551,12 +635,12 @@ func NewConstHistogram(
count: count,
sum: sum,
buckets: buckets,
labelPairs: makeLabelPairs(desc, labelValues),
labelPairs: MakeLabelPairs(desc, labelValues),
}, nil
}
// MustNewConstHistogram is a version of NewConstHistogram that panics where
// NewConstMetric would have returned an error.
// NewConstHistogram would have returned an error.
func MustNewConstHistogram(
desc *Desc,
count uint64,

View file

@ -0,0 +1,142 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build go1.17
// +build go1.17
package internal
import (
"math"
"path"
"runtime/metrics"
"strings"
"github.com/prometheus/common/model"
)
// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics
// metric description and validates whether the metric is suitable for integration
// with Prometheus.
//
// Returns false if a name could not be produced, or if Prometheus does not understand
// the runtime/metrics Kind.
//
// Note that the main reason a name couldn't be produced is if the runtime/metrics
// package exports a name with characters outside the valid Prometheus metric name
// character set. This is theoretically possible, but should never happen in practice.
// Still, don't rely on it.
func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) {
namespace := "go"
comp := strings.SplitN(d.Name, ":", 2)
key := comp[0]
unit := comp[1]
// The last path element in the key is the name,
// the rest is the subsystem.
subsystem := path.Dir(key[1:] /* remove leading / */)
name := path.Base(key)
// subsystem is translated by replacing all / and - with _.
subsystem = strings.ReplaceAll(subsystem, "/", "_")
subsystem = strings.ReplaceAll(subsystem, "-", "_")
// unit is translated assuming that the unit contains no
// non-ASCII characters.
unit = strings.ReplaceAll(unit, "-", "_")
unit = strings.ReplaceAll(unit, "*", "_")
unit = strings.ReplaceAll(unit, "/", "_per_")
// name has - replaced with _ and is concatenated with the unit and
// other data.
name = strings.ReplaceAll(name, "-", "_")
name = name + "_" + unit
if d.Cumulative {
name = name + "_total"
}
valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
switch d.Kind {
case metrics.KindUint64:
case metrics.KindFloat64:
case metrics.KindFloat64Histogram:
default:
valid = false
}
return namespace, subsystem, name, valid
}
// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces
// a reduced set of buckets. This function always removes any -Inf bucket as it's represented
// as the bottom-most upper-bound inclusive bucket in Prometheus.
func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
switch unit {
case "bytes":
// Rebucket as powers of 2.
return rebucketExp(buckets, 2)
case "seconds":
// Rebucket as powers of 10 and then merge all buckets greater
// than 1 second into the +Inf bucket.
b := rebucketExp(buckets, 10)
for i := range b {
if b[i] <= 1 {
continue
}
b[i] = math.Inf(1)
b = b[:i+1]
break
}
return b
}
return buckets
}
// rebucketExp takes a list of bucket boundaries (lower bound inclusive) and
// downsamples the buckets to those a multiple of base apart. The end result
// is a roughly exponential (in many cases, perfectly exponential) bucketing
// scheme.
func rebucketExp(buckets []float64, base float64) []float64 {
bucket := buckets[0]
var newBuckets []float64
// We may see a -Inf here, in which case, add it and skip it
// since we risk producing NaNs otherwise.
//
// We need to preserve -Inf values to maintain runtime/metrics
// conventions. We'll strip it out later.
if bucket == math.Inf(-1) {
newBuckets = append(newBuckets, bucket)
buckets = buckets[1:]
bucket = buckets[0]
}
// From now on, bucket should always have a non-Inf value because
// Infs are only ever at the ends of the bucket lists, so
// arithmetic operations on it are non-NaN.
for i := 1; i < len(buckets); i++ {
if bucket >= 0 && buckets[i] < bucket*base {
// The next bucket we want to include is at least bucket*base.
continue
} else if bucket < 0 && buckets[i] < bucket/base {
// In this case the bucket we're targeting is negative, and since
// we're ascending through buckets here, we need to divide to get
// closer to zero exponentially.
continue
}
// The +Inf bucket will always be the last one, and we'll always
// end up including it here because bucket
newBuckets = append(newBuckets, bucket)
bucket = buckets[i]
}
return append(newBuckets, bucket)
}

View file

@ -17,12 +17,14 @@ import (
"strings"
"time"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
const separatorByte byte = 255
var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
// A Metric models a single sample value with its meta data being exported to
// Prometheus. Implementations of Metric in this package are Gauge, Counter,
@ -56,7 +58,7 @@ type Metric interface {
}
// Opts bundles the options for creating most Metric types. Each metric
// implementation XXX has its own XXXOpts type, but in most cases, it is just be
// implementation XXX has its own XXXOpts type, but in most cases, it is just
// an alias of this type (which might change when the requirement arises.)
//
// It is mandatory to set Name to a non-empty string. All other fields are
@ -87,7 +89,7 @@ type Opts struct {
// better covered by target labels set by the scraping Prometheus
// server, or by one specific metric (e.g. a build_info or a
// machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels
}

View file

@ -50,3 +50,15 @@ type ObserverVec interface {
Collector
}
// ExemplarObserver is implemented by Observers that offer the option of
// observing a value together with an exemplar. Its ObserveWithExemplar method
// works like the Observe method of an Observer but also replaces the currently
// saved exemplar (if any) with a new one, created from the provided value, the
// current time as timestamp, and the provided Labels. Empty Labels will lead to
// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
// left in place. ObserveWithExemplar panics if any of the provided labels are
// invalid or if the provided labels contain more than 64 runes in total.
type ExemplarObserver interface {
ObserveWithExemplar(value float64, exemplar Labels)
}

View file

@ -15,7 +15,11 @@ package prometheus
import (
"errors"
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
)
type processCollector struct {
@ -50,16 +54,10 @@ type ProcessCollectorOpts struct {
ReportErrors bool
}
// NewProcessCollector returns a collector which exports the current state of
// process metrics including CPU, memory and file descriptor usage as well as
// the process start time. The detailed behavior is defined by the provided
// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a
// collector for the current process with an empty namespace string and no error
// reporting.
// NewProcessCollector is the obsolete version of collectors.NewProcessCollector.
// See there for documentation.
//
// The collector only works on operating systems with a Linux-style proc
// filesystem and on Microsoft Windows. On other operating systems, it will not
// collect any metrics.
// Deprecated: Use collectors.NewProcessCollector instead.
func NewProcessCollector(opts ProcessCollectorOpts) Collector {
ns := ""
if len(opts.Namespace) > 0 {
@ -149,3 +147,20 @@ func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error)
}
ch <- NewInvalidMetric(desc, err)
}
// NewPidFileFn returns a function that retrieves a pid from the specified file.
// It is meant to be used for the PidFn field in ProcessCollectorOpts.
func NewPidFileFn(pidFilePath string) func() (int, error) {
return func() (int, error) {
content, err := ioutil.ReadFile(pidFilePath)
if err != nil {
return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err)
}
pid, err := strconv.Atoi(strings.TrimSpace(string(content)))
if err != nil {
return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err)
}
return pid, nil
}
}

View file

@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows
// +build !windows
package prometheus

View file

@ -33,18 +33,22 @@ var (
)
type processMemoryCounters struct {
// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex
// System interface description
// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex
// Refer to the Golang internal implementation
// https://golang.org/src/internal/syscall/windows/psapi_windows.go
_ uint32
PageFaultCount uint32
PeakWorkingSetSize uint64
WorkingSetSize uint64
QuotaPeakPagedPoolUsage uint64
QuotaPagedPoolUsage uint64
QuotaPeakNonPagedPoolUsage uint64
QuotaNonPagedPoolUsage uint64
PagefileUsage uint64
PeakPagefileUsage uint64
PrivateUsage uint64
PeakWorkingSetSize uintptr
WorkingSetSize uintptr
QuotaPeakPagedPoolUsage uintptr
QuotaPagedPoolUsage uintptr
QuotaPeakNonPagedPoolUsage uintptr
QuotaNonPagedPoolUsage uintptr
PagefileUsage uintptr
PeakPagefileUsage uintptr
PrivateUsage uintptr
}
func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {

View file

@ -53,15 +53,21 @@ func (r *responseWriterDelegator) Written() int64 {
}
func (r *responseWriterDelegator) WriteHeader(code int) {
if r.observeWriteHeader != nil && !r.wroteHeader {
// Only call observeWriteHeader for the 1st time. It's a bug if
// WriteHeader is called more than once, but we want to protect
// against it here. Note that we still delegate the WriteHeader
// to the original ResponseWriter to not mask the bug from it.
r.observeWriteHeader(code)
}
r.status = code
r.wroteHeader = true
r.ResponseWriter.WriteHeader(code)
if r.observeWriteHeader != nil {
r.observeWriteHeader(code)
}
}
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
// If applicable, call WriteHeader here so that observeWriteHeader is
// handled appropriately.
if !r.wroteHeader {
r.WriteHeader(http.StatusOK)
}
@ -77,17 +83,23 @@ type readerFromDelegator struct{ *responseWriterDelegator }
type pusherDelegator struct{ *responseWriterDelegator }
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
//remove support from client_golang yet.
//nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
func (d flusherDelegator) Flush() {
// If applicable, call WriteHeader here so that observeWriteHeader is
// handled appropriately.
if !d.wroteHeader {
d.WriteHeader(http.StatusOK)
}
d.ResponseWriter.(http.Flusher).Flush()
}
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return d.ResponseWriter.(http.Hijacker).Hijack()
}
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
// If applicable, call WriteHeader here so that observeWriteHeader is
// handled appropriately.
if !d.wroteHeader {
d.WriteHeader(http.StatusOK)
}
@ -335,8 +347,7 @@ func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) deleg
}
id := 0
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
//remove support from client_golang yet.
//nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
if _, ok := w.(http.CloseNotifier); ok {
id += closeNotifier
}

View file

@ -99,7 +99,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
}
if opts.Registry != nil {
// Initialize all possibilites that can occur below.
// Initialize all possibilities that can occur below.
errCnt.WithLabelValues("gathering")
errCnt.WithLabelValues("encoding")
if err := opts.Registry.Register(errCnt); err != nil {
@ -144,7 +144,12 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
}
}
contentType := expfmt.Negotiate(req.Header)
var contentType expfmt.Format
if opts.EnableOpenMetrics {
contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header)
} else {
contentType = expfmt.Negotiate(req.Header)
}
header := rsp.Header()
header.Set(contentTypeHeader, string(contentType))
@ -162,28 +167,40 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
enc := expfmt.NewEncoder(w, contentType)
var lastErr error
for _, mf := range mfs {
if err := enc.Encode(mf); err != nil {
lastErr = err
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error encoding and sending metric family:", err)
}
errCnt.WithLabelValues("encoding").Inc()
switch opts.ErrorHandling {
case PanicOnError:
panic(err)
case ContinueOnError:
// Handled later.
case HTTPErrorOnError:
httpError(rsp, err)
return
}
// handleError handles the error according to opts.ErrorHandling
// and returns true if we have to abort after the handling.
handleError := func(err error) bool {
if err == nil {
return false
}
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error encoding and sending metric family:", err)
}
errCnt.WithLabelValues("encoding").Inc()
switch opts.ErrorHandling {
case PanicOnError:
panic(err)
case HTTPErrorOnError:
// We cannot really send an HTTP error at this
// point because we most likely have written
// something to rsp already. But at least we can
// stop sending.
return true
}
// Do nothing in all other cases, including ContinueOnError.
return false
}
if lastErr != nil {
httpError(rsp, lastErr)
for _, mf := range mfs {
if handleError(enc.Encode(mf)) {
return
}
}
if closer, ok := enc.(expfmt.Closer); ok {
// This in particular takes care of the final "# EOF\n" line for OpenMetrics.
if handleError(closer.Close()) {
return
}
}
})
@ -255,7 +272,12 @@ type HandlerErrorHandling int
// errors are encountered.
const (
// Serve an HTTP status code 500 upon the first error
// encountered. Report the error message in the body.
// encountered. Report the error message in the body. Note that HTTP
// errors cannot be served anymore once the beginning of a regular
// payload has been sent. Thus, in the (unlikely) case that encoding the
// payload into the negotiated wire format fails, serving the response
// will simply be aborted. Set an ErrorLog in HandlerOpts to detect
// those errors.
HTTPErrorOnError HandlerErrorHandling = iota
// Ignore errors and try to serve as many metrics as possible. However,
// if no metrics can be served, serve an HTTP status code 500 and the
@ -281,8 +303,12 @@ type Logger interface {
// HandlerOpts specifies options how to serve metrics via an http.Handler. The
// zero value of HandlerOpts is a reasonable default.
type HandlerOpts struct {
// ErrorLog specifies an optional logger for errors collecting and
// serving metrics. If nil, errors are not logged at all.
// ErrorLog specifies an optional Logger for errors collecting and
// serving metrics. If nil, errors are not logged at all. Note that the
// type of a reported error is often prometheus.MultiError, which
// formats into a multi-line error string. If you want to avoid the
// latter, create a Logger implementation that detects a
// prometheus.MultiError and formats the contained errors into one line.
ErrorLog Logger
// ErrorHandling defines how errors are handled. Note that errors are
// logged regardless of the configured ErrorHandling provided ErrorLog
@ -318,6 +344,16 @@ type HandlerOpts struct {
// away). Until the implementation is improved, it is recommended to
// implement a separate timeout in potentially slow Collectors.
Timeout time.Duration
// If true, the experimental OpenMetrics encoding is added to the
// possible options during content negotiation. Note that Prometheus
// 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is
// the only way to transmit exemplars. However, the move to OpenMetrics
// is not completely transparent. Most notably, the values of "quantile"
// labels of Summaries and "le" labels of Histograms are formatted with
// a trailing ".0" if they would otherwise look like integer numbers
// (which changes the identity of the resulting series on the Prometheus
// server).
EnableOpenMetrics bool
}
// gzipAccepted returns whether the client will accept gzip-encoded content.
@ -334,11 +370,9 @@ func gzipAccepted(header http.Header) bool {
}
// httpError removes any content-encoding header and then calls http.Error with
// the provided error and http.StatusInternalServerErrer. Error contents is
// supposed to be uncompressed plain text. However, same as with a plain
// http.Error, any header settings will be void if the header has already been
// sent. The error message will still be written to the writer, but it will
// probably be of limited use.
// the provided error and http.StatusInternalServerError. Error contents is
// supposed to be uncompressed plain text. Same as with a plain http.Error, this
// must not be called if the header or any payload has already been sent.
func httpError(rsp http.ResponseWriter, err error) {
rsp.Header().Del(contentEncodingHeader)
http.Error(

View file

@ -49,7 +49,10 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp
// http.RoundTripper to observe the request result with the provided CounterVec.
// The CounterVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
// panics otherwise. Partitioning of the CounterVec happens by HTTP status code
// panics otherwise. For the "method" label a predefined default label value set
// is used to filter given values. Values besides predefined values will count
// as `unknown` method.`WithExtraMethods` can be used to add more
// methods to the set. Partitioning of the CounterVec happens by HTTP status code
// and/or HTTP method if the respective instance label names are present in the
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
//
@ -57,13 +60,18 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp
// is not incremented.
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
rtOpts := &option{}
for _, o := range opts {
o(rtOpts)
}
code, method := checkLabels(counter)
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
resp, err := next.RoundTrip(r)
if err == nil {
counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc()
}
return resp, err
})
@ -73,7 +81,10 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
// http.RoundTripper to observe the request duration with the provided
// ObserverVec. The ObserverVec must have zero, one, or two non-const
// non-curried labels. For those, the only allowed label names are "code" and
// "method". The function panics otherwise. The Observe method of the Observer
// "method". The function panics otherwise. For the "method" label a predefined
// default label value set is used to filter given values. Values besides
// predefined values will count as `unknown` method. `WithExtraMethods`
// can be used to add more methods to the set. The Observe method of the Observer
// in the ObserverVec is called with the request duration in
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
@ -85,14 +96,19 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
rtOpts := &option{}
for _, o := range opts {
o(rtOpts)
}
code, method := checkLabels(obs)
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
start := time.Now()
resp, err := next.RoundTrip(r)
if err == nil {
obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Observe(time.Since(start).Seconds())
}
return resp, err
})

View file

@ -43,14 +43,17 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl
// InstrumentHandlerDuration is a middleware that wraps the provided
// http.Handler to observe the request duration with the provided ObserverVec.
// The ObserverVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
// panics otherwise. The Observe method of the Observer in the ObserverVec is
// called with the request duration in seconds. Partitioning happens by HTTP
// status code and/or HTTP method if the respective instance label names are
// present in the ObserverVec. For unpartitioned observations, use an
// ObserverVec with zero labels. Note that partitioning of Histograms is
// expensive and should be used judiciously.
// The ObserverVec must have valid metric and label names and must have zero,
// one, or two non-const non-curried labels. For those, the only allowed label
// names are "code" and "method". The function panics otherwise. For the "method"
// label a predefined default label value set is used to filter given values.
// Values besides predefined values will count as `unknown` method.
//`WithExtraMethods` can be used to add more methods to the set. The Observe
// method of the Observer in the ObserverVec is called with the request duration
// in seconds. Partitioning happens by HTTP status code and/or HTTP method if
// the respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
// partitioning of Histograms is expensive and should be used judiciously.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
@ -58,7 +61,12 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl
//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
mwOpts := &option{}
for _, o := range opts {
o(mwOpts)
}
code, method := checkLabels(obs)
if code {
@ -67,57 +75,70 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) ht
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
next.ServeHTTP(w, r)
obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
})
}
// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
// to observe the request result with the provided CounterVec. The CounterVec
// must have zero, one, or two non-const non-curried labels. For those, the only
// allowed label names are "code" and "method". The function panics
// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or
// HTTP method if the respective instance label names are present in the
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
// to observe the request result with the provided CounterVec. The CounterVec
// must have valid metric and label names and must have zero, one, or two
// non-const non-curried labels. For those, the only allowed label names are
// "code" and "method". The function panics otherwise. For the "method"
// label a predefined default label value set is used to filter given values.
// Values besides predefined values will count as `unknown` method.
// `WithExtraMethods` can be used to add more methods to the set. Partitioning of the
// CounterVec happens by HTTP status code and/or HTTP method if the respective
// instance label names are present in the CounterVec. For unpartitioned
// counting, use a CounterVec with zero labels.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
// If the wrapped Handler panics, the Counter is not incremented.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc {
mwOpts := &option{}
for _, o := range opts {
o(mwOpts)
}
code, method := checkLabels(counter)
if code {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
counter.With(labels(code, method, r.Method, d.Status())).Inc()
counter.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Inc()
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
counter.With(labels(code, method, r.Method, 0)).Inc()
counter.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Inc()
})
}
// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
// http.Handler to observe with the provided ObserverVec the request duration
// until the response headers are written. The ObserverVec must have zero, one,
// or two non-const non-curried labels. For those, the only allowed label names
// are "code" and "method". The function panics otherwise. The Observe method of
// the Observer in the ObserverVec is called with the request duration in
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
// partitioning of Histograms is expensive and should be used judiciously.
// until the response headers are written. The ObserverVec must have valid
// metric and label names and must have zero, one, or two non-const non-curried
// labels. For those, the only allowed label names are "code" and "method". The
// function panics otherwise. For the "method" label a predefined default label
// value set is used to filter given values. Values besides predefined values
// will count as `unknown` method.`WithExtraMethods` can be used to add more
// methods to the set. The Observe method of the Observer in the
// ObserverVec is called with the request duration in seconds. Partitioning
// happens by HTTP status code and/or HTTP method if the respective instance
// label names are present in the ObserverVec. For unpartitioned observations,
// use an ObserverVec with zero labels. Note that partitioning of Histograms is
// expensive and should be used judiciously.
//
// If the wrapped Handler panics before calling WriteHeader, no value is
// reported.
@ -126,35 +147,48 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler)
// if used with Go1.9+.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
mwOpts := &option{}
for _, o := range opts {
o(mwOpts)
}
code, method := checkLabels(obs)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, func(status int) {
obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
obs.With(labels(code, method, r.Method, status, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
})
next.ServeHTTP(d, r)
})
}
// InstrumentHandlerRequestSize is a middleware that wraps the provided
// http.Handler to observe the request size with the provided ObserverVec. The
// ObserverVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
// panics otherwise. The Observe method of the Observer in the ObserverVec is
// called with the request size in bytes. Partitioning happens by HTTP status
// code and/or HTTP method if the respective instance label names are present in
// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
// labels. Note that partitioning of Histograms is expensive and should be used
// judiciously.
// http.Handler to observe the request size with the provided ObserverVec. The
// ObserverVec must have valid metric and label names and must have zero, one,
// or two non-const non-curried labels. For those, the only allowed label names
// are "code" and "method". The function panics otherwise. For the "method"
// label a predefined default label value set is used to filter given values.
// Values besides predefined values will count as `unknown` method.
// `WithExtraMethods` can be used to add more methods to the set. The Observe
// method of the Observer in the ObserverVec is called with the request size in
// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
// partitioning of Histograms is expensive and should be used judiciously.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
// If the wrapped Handler panics, no values are reported.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
mwOpts := &option{}
for _, o := range opts {
o(mwOpts)
}
code, method := checkLabels(obs)
if code {
@ -162,42 +196,56 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler)
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
size := computeApproximateRequestSize(r)
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(size))
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
size := computeApproximateRequestSize(r)
obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(float64(size))
})
}
// InstrumentHandlerResponseSize is a middleware that wraps the provided
// http.Handler to observe the response size with the provided ObserverVec. The
// ObserverVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
// panics otherwise. The Observe method of the Observer in the ObserverVec is
// called with the response size in bytes. Partitioning happens by HTTP status
// code and/or HTTP method if the respective instance label names are present in
// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
// labels. Note that partitioning of Histograms is expensive and should be used
// judiciously.
// http.Handler to observe the response size with the provided ObserverVec. The
// ObserverVec must have valid metric and label names and must have zero, one,
// or two non-const non-curried labels. For those, the only allowed label names
// are "code" and "method". The function panics otherwise. For the "method"
// label a predefined default label value set is used to filter given values.
// Values besides predefined values will count as `unknown` method.
// `WithExtraMethods` can be used to add more methods to the set. The Observe
// method of the Observer in the ObserverVec is called with the response size in
// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
// partitioning of Histograms is expensive and should be used judiciously.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
// If the wrapped Handler panics, no values are reported.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler {
mwOpts := &option{}
for _, o := range opts {
o(mwOpts)
}
code, method := checkLabels(obs)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(d.Written()))
})
}
// checkLabels returns whether the provided Collector has a non-const,
// non-curried label named "code" and/or "method". It panics if the provided
// Collector does not have a Desc or has more than one Desc or its Desc is
// invalid. It also panics if the Collector has any non-const, non-curried
// labels that are not named "code" or "method".
func checkLabels(c prometheus.Collector) (code bool, method bool) {
// TODO(beorn7): Remove this hacky way to check for instance labels
// once Descriptors can have their dimensionality queried.
@ -225,6 +273,10 @@ func checkLabels(c prometheus.Collector) (code bool, method bool) {
close(descc)
// Make sure the Collector has a valid Desc by registering it with a
// temporary registry.
prometheus.NewRegistry().MustRegister(c)
// Create a ConstMetric with the Desc. Since we don't know how many
// variable labels there are, try for as long as it needs.
for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
@ -279,7 +331,7 @@ func isLabelCurried(c prometheus.Collector, label string) bool {
// unnecessary allocations on each request.
var emptyLabels = prometheus.Labels{}
func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
if !(code || method) {
return emptyLabels
}
@ -289,7 +341,7 @@ func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
labels["code"] = sanitizeCode(status)
}
if method {
labels["method"] = sanitizeMethod(reqMethod)
labels["method"] = sanitizeMethod(reqMethod, extraMethods...)
}
return labels
@ -319,7 +371,12 @@ func computeApproximateRequestSize(r *http.Request) int {
return s
}
func sanitizeMethod(m string) string {
// If the wrapped http.Handler has a known method, it will be sanitized and returned.
// Otherwise, "unknown" will be returned. The known method list can be extended
// as needed by using extraMethods parameter.
func sanitizeMethod(m string, extraMethods ...string) string {
// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for
// the methods chosen as default.
switch m {
case "GET", "get":
return "get"
@ -337,15 +394,25 @@ func sanitizeMethod(m string) string {
return "options"
case "NOTIFY", "notify":
return "notify"
case "TRACE", "trace":
return "trace"
case "PATCH", "patch":
return "patch"
default:
return strings.ToLower(m)
for _, method := range extraMethods {
if strings.EqualFold(m, method) {
return strings.ToLower(m)
}
}
return "unknown"
}
}
// If the wrapped http.Handler has not set a status code, i.e. the value is
// currently 0, santizeCode will return 200, for consistency with behavior in
// currently 0, sanitizeCode will return 200, for consistency with behavior in
// the stdlib.
func sanitizeCode(s int) string {
// See for accepted codes https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
switch s {
case 100:
return "100"
@ -442,6 +509,9 @@ func sanitizeCode(s int) string {
return "511"
default:
return strconv.Itoa(s)
if s >= 100 && s <= 599 {
return strconv.Itoa(s)
}
return "unknown"
}
}

View file

@ -0,0 +1,31 @@
// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promhttp
// Option are used to configure a middleware or round tripper..
type Option func(*option)
type option struct {
extraMethods []string
}
// WithExtraMethods adds additional HTTP methods to the list of allowed methods.
// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list.
//
// See the example for ExampleInstrumentHandlerWithExtraMethods for example usage.
func WithExtraMethods(methods ...string) Option {
return func(o *option) {
o.extraMethods = methods
}
}

View file

@ -25,6 +25,8 @@ import (
"sync"
"unicode/utf8"
"github.com/cespare/xxhash/v2"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/expfmt"
@ -74,7 +76,7 @@ func NewRegistry() *Registry {
// NewPedanticRegistry returns a registry that checks during collection if each
// collected Metric is consistent with its reported Desc, and if the Desc has
// actually been registered with the registry. Unchecked Collectors (those whose
// Describe methed does not yield any descriptors) are excluded from the check.
// Describe method does not yield any descriptors) are excluded from the check.
//
// Usually, a Registry will be happy as long as the union of all collected
// Metrics is consistent and valid even if some metrics are not consistent with
@ -213,6 +215,8 @@ func (err AlreadyRegisteredError) Error() string {
// by a Gatherer to report multiple errors during MetricFamily gathering.
type MultiError []error
// Error formats the contained errors as a bullet point list, preceded by the
// total number of errors. Note that this results in a multi-line string.
func (errs MultiError) Error() string {
if len(errs) == 0 {
return ""
@ -266,7 +270,7 @@ func (r *Registry) Register(c Collector) error {
descChan = make(chan *Desc, capDescChan)
newDescIDs = map[uint64]struct{}{}
newDimHashesByName = map[string]uint64{}
collectorID uint64 // Just a sum of all desc IDs.
collectorID uint64 // All desc IDs XOR'd together.
duplicateDescErr error
)
go func() {
@ -293,12 +297,12 @@ func (r *Registry) Register(c Collector) error {
if _, exists := r.descIDs[desc.id]; exists {
duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
}
// If it is not a duplicate desc in this collector, add it to
// If it is not a duplicate desc in this collector, XOR it to
// the collectorID. (We allow duplicate descs within the same
// collector, but their existence must be a no-op.)
if _, exists := newDescIDs[desc.id]; !exists {
newDescIDs[desc.id] = struct{}{}
collectorID += desc.id
collectorID ^= desc.id
}
// Are all the label names and the help string consistent with
@ -360,7 +364,7 @@ func (r *Registry) Unregister(c Collector) bool {
var (
descChan = make(chan *Desc, capDescChan)
descIDs = map[uint64]struct{}{}
collectorID uint64 // Just a sum of the desc IDs.
collectorID uint64 // All desc IDs XOR'd together.
)
go func() {
c.Describe(descChan)
@ -368,7 +372,7 @@ func (r *Registry) Unregister(c Collector) bool {
}()
for desc := range descChan {
if _, exists := descIDs[desc.id]; !exists {
collectorID += desc.id
collectorID ^= desc.id
descIDs[desc.id] = struct{}{}
}
}
@ -875,9 +879,9 @@ func checkMetricConsistency(
}
// Is the metric unique (i.e. no other metric with the same name and the same labels)?
h := hashNew()
h = hashAdd(h, name)
h = hashAddByte(h, separatorByte)
h := xxhash.New()
h.WriteString(name)
h.Write(separatorByteSlice)
// Make sure label pairs are sorted. We depend on it for the consistency
// check.
if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) {
@ -888,18 +892,19 @@ func checkMetricConsistency(
dtoMetric.Label = copiedLabels
}
for _, lp := range dtoMetric.Label {
h = hashAdd(h, lp.GetName())
h = hashAddByte(h, separatorByte)
h = hashAdd(h, lp.GetValue())
h = hashAddByte(h, separatorByte)
h.WriteString(lp.GetName())
h.Write(separatorByteSlice)
h.WriteString(lp.GetValue())
h.Write(separatorByteSlice)
}
if _, exists := metricHashes[h]; exists {
hSum := h.Sum64()
if _, exists := metricHashes[hSum]; exists {
return fmt.Errorf(
"collected metric %q { %s} was collected before with the same name and label values",
name, dtoMetric,
)
}
metricHashes[h] = struct{}{}
metricHashes[hSum] = struct{}{}
return nil
}

View file

@ -23,6 +23,7 @@ import (
"time"
"github.com/beorn7/perks/quantile"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
@ -54,7 +55,12 @@ type Summary interface {
Metric
Collector
// Observe adds a single observation to the summary.
// Observe adds a single observation to the summary. Observations are
// usually positive or zero. Negative observations are accepted but
// prevent current versions of Prometheus from properly detecting
// counter resets in the sum of observations. See
// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
// for details.
Observe(float64)
}
@ -109,7 +115,7 @@ type SummaryOpts struct {
// better covered by target labels set by the scraping Prometheus
// server, or by one specific metric (e.g. a build_info or a
// machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels
// Objectives defines the quantile rank estimates with their respective
@ -120,7 +126,9 @@ type SummaryOpts struct {
Objectives map[float64]float64
// MaxAge defines the duration for which an observation stays relevant
// for the summary. Must be positive. The default value is DefMaxAge.
// for the summary. Only applies to pre-calculated quantiles, does not
// apply to _sum and _count. Must be positive. The default value is
// DefMaxAge.
MaxAge time.Duration
// AgeBuckets is the number of buckets used to exclude observations that
@ -207,8 +215,8 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
// Use the lock-free implementation of a Summary without objectives.
s := &noObjectivesSummary{
desc: desc,
labelPairs: makeLabelPairs(desc, labelValues),
counts: [2]*summaryCounts{&summaryCounts{}, &summaryCounts{}},
labelPairs: MakeLabelPairs(desc, labelValues),
counts: [2]*summaryCounts{{}, {}},
}
s.init(s) // Init self-collection.
return s
@ -220,7 +228,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
objectives: opts.Objectives,
sortedObjectives: make([]float64, 0, len(opts.Objectives)),
labelPairs: makeLabelPairs(desc, labelValues),
labelPairs: MakeLabelPairs(desc, labelValues),
hotBuf: make([]float64, 0, opts.BufCap),
coldBuf: make([]float64, 0, opts.BufCap),
@ -512,7 +520,7 @@ func (s quantSort) Less(i, j int) bool {
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewSummaryVec.
type SummaryVec struct {
*metricVec
*MetricVec
}
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
@ -534,14 +542,14 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
opts.ConstLabels,
)
return &SummaryVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...)
}),
}
}
// GetMetricWithLabelValues returns the Summary for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Summary is created.
//
// It is possible to call this method without using the returned Summary to only
@ -556,7 +564,7 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
// example.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@ -565,7 +573,7 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Observer), err
}
@ -573,19 +581,19 @@ func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
}
// GetMetricWith returns the Summary for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Summary is created. Implications of
// creating a Summary without using it and keeping the Summary for later use are
// the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
metric, err := v.metricVec.getMetricWith(labels)
metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Observer), err
}
@ -629,7 +637,7 @@ func (v *SummaryVec) With(labels Labels) Observer {
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
vec, err := v.curryWith(labels)
vec, err := v.MetricVec.CurryWith(labels)
if vec != nil {
return &SummaryVec{vec}, err
}
@ -715,7 +723,7 @@ func NewConstSummary(
count: count,
sum: sum,
quantiles: quantiles,
labelPairs: makeLabelPairs(desc, labelValues),
labelPairs: MakeLabelPairs(desc, labelValues),
}, nil
}

View file

@ -16,8 +16,12 @@ package prometheus
import (
"fmt"
"sort"
"time"
"unicode/utf8"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
dto "github.com/prometheus/client_model/go"
)
@ -25,7 +29,8 @@ import (
// ValueType is an enumeration of metric types that represent a simple value.
type ValueType int
// Possible values for the ValueType enum.
// Possible values for the ValueType enum. Use UntypedValue to mark a metric
// with an unknown type.
const (
_ ValueType = iota
CounterValue
@ -58,7 +63,7 @@ func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *val
desc: desc,
valType: valueType,
function: function,
labelPairs: makeLabelPairs(desc, nil),
labelPairs: MakeLabelPairs(desc, nil),
}
result.init(result)
return result
@ -69,7 +74,7 @@ func (v *valueFunc) Desc() *Desc {
}
func (v *valueFunc) Write(out *dto.Metric) error {
return populateMetric(v.valType, v.function(), v.labelPairs, out)
return populateMetric(v.valType, v.function(), v.labelPairs, nil, out)
}
// NewConstMetric returns a metric with one fixed value that cannot be
@ -90,7 +95,7 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues
desc: desc,
valType: valueType,
val: value,
labelPairs: makeLabelPairs(desc, labelValues),
labelPairs: MakeLabelPairs(desc, labelValues),
}, nil
}
@ -116,19 +121,20 @@ func (m *constMetric) Desc() *Desc {
}
func (m *constMetric) Write(out *dto.Metric) error {
return populateMetric(m.valType, m.val, m.labelPairs, out)
return populateMetric(m.valType, m.val, m.labelPairs, nil, out)
}
func populateMetric(
t ValueType,
v float64,
labelPairs []*dto.LabelPair,
e *dto.Exemplar,
m *dto.Metric,
) error {
m.Label = labelPairs
switch t {
case CounterValue:
m.Counter = &dto.Counter{Value: proto.Float64(v)}
m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e}
case GaugeValue:
m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
case UntypedValue:
@ -139,7 +145,14 @@ func populateMetric(
return nil
}
func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
// MakeLabelPairs is a helper function to create protobuf LabelPairs from the
// variable and constant labels in the provided Desc. The values for the
// variable labels are defined by the labelValues slice, which must be in the
// same order as the corresponding variable labels in the Desc.
//
// This function is only needed for custom Metric implementations. See MetricVec
// example.
func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
if totalLen == 0 {
// Super fast path.
@ -160,3 +173,40 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
sort.Sort(labelPairSorter(labelPairs))
return labelPairs
}
// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
const ExemplarMaxRunes = 64
// newExemplar creates a new dto.Exemplar from the provided values. An error is
// returned if any of the label names or values are invalid or if the total
// number of runes in the label names and values exceeds ExemplarMaxRunes.
func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
e := &dto.Exemplar{}
e.Value = proto.Float64(value)
tsProto := timestamppb.New(ts)
if err := tsProto.CheckValid(); err != nil {
return nil, err
}
e.Timestamp = tsProto
labelPairs := make([]*dto.LabelPair, 0, len(l))
var runes int
for name, value := range l {
if !checkLabelName(name) {
return nil, fmt.Errorf("exemplar label name %q is invalid", name)
}
runes += utf8.RuneCountInString(name)
if !utf8.ValidString(value) {
return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value)
}
runes += utf8.RuneCountInString(value)
labelPairs = append(labelPairs, &dto.LabelPair{
Name: proto.String(name),
Value: proto.String(value),
})
}
if runes > ExemplarMaxRunes {
return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes)
}
e.Label = labelPairs
return e, nil
}

View file

@ -20,12 +20,20 @@ import (
"github.com/prometheus/common/model"
)
// metricVec is a Collector to bundle metrics of the same name that differ in
// their label values. metricVec is not used directly (and therefore
// unexported). It is used as a building block for implementations of vectors of
// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec.
// It also handles label currying. It uses basicMetricVec internally.
type metricVec struct {
// MetricVec is a Collector to bundle metrics of the same name that differ in
// their label values. MetricVec is not used directly but as a building block
// for implementations of vectors of a given metric type, like GaugeVec,
// CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be
// used for custom Metric implementations.
//
// To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in
// FooVec and initialize it with NewMetricVec. Implement wrappers for
// GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather
// than (Metric, error). Similarly, create a wrapper for CurryWith that returns
// (*FooVec, error) rather than (*MetricVec, error). It is recommended to also
// add the convenience methods WithLabelValues, With, and MustCurryWith, which
// panic instead of returning errors. See also the MetricVec example.
type MetricVec struct {
*metricMap
curry []curriedLabelValue
@ -35,9 +43,9 @@ type metricVec struct {
hashAddByte func(h uint64, b byte) uint64
}
// newMetricVec returns an initialized metricVec.
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
return &metricVec{
// NewMetricVec returns an initialized metricVec.
func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
return &MetricVec{
metricMap: &metricMap{
metrics: map[uint64][]metricWithLabelValues{},
desc: desc,
@ -63,7 +71,7 @@ func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the CounterVec example.
func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
h, err := m.hashLabelValues(lvs)
if err != nil {
return false
@ -82,7 +90,7 @@ func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
//
// This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods.
func (m *metricVec) Delete(labels Labels) bool {
func (m *MetricVec) Delete(labels Labels) bool {
h, err := m.hashLabels(labels)
if err != nil {
return false
@ -91,7 +99,36 @@ func (m *metricVec) Delete(labels Labels) bool {
return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
}
func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
// Without explicit forwarding of Describe, Collect, Reset, those methods won't
// show up in GoDoc.
// Describe implements Collector.
func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
// Collect implements Collector.
func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
// Reset deletes all metrics in this vector.
func (m *MetricVec) Reset() { m.metricMap.Reset() }
// CurryWith returns a vector curried with the provided labels, i.e. the
// returned vector has those labels pre-set for all labeled operations performed
// on it. The cardinality of the curried vector is reduced accordingly. The
// order of the remaining labels stays the same (just with the curried labels
// taken out of the sequence which is relevant for the
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
// vector, but only with labels not yet used for currying before.
//
// The metrics contained in the MetricVec are shared between the curried and
// uncurried vectors. They are just accessed differently. Curried and uncurried
// vectors behave identically in terms of collection. Only one must be
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
//
// Note that CurryWith is usually not called directly but through a wrapper
// around MetricVec, implementing a vector for a specific Metric
// implementation, for example GaugeVec.
func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
var (
newCurry []curriedLabelValue
oldCurry = m.curry
@ -116,7 +153,7 @@ func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
}
return &metricVec{
return &MetricVec{
metricMap: m.metricMap,
curry: newCurry,
hashAdd: m.hashAdd,
@ -124,7 +161,34 @@ func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
}, nil
}
func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
// GetMetricWithLabelValues returns the Metric for the given slice of label
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Metric is created (by
// calling the newMetric function provided during construction of the
// MetricVec).
//
// It is possible to call this method without using the returned Metric to only
// create the new Metric but leave it in its initial state.
//
// Keeping the Metric for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Metric from the MetricVec. In that case, the
// Metric will still exist, but it will not be exported anymore, even if a
// Metric with the same label values is created later.
//
// An error is returned if the number of label values is not the same as the
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
//
// Note that GetMetricWithLabelValues is usually not called directly but through
// a wrapper around MetricVec, implementing a vector for a specific Metric
// implementation, for example GaugeVec.
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
h, err := m.hashLabelValues(lvs)
if err != nil {
return nil, err
@ -133,7 +197,23 @@ func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
}
func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
// GetMetricWith returns the Metric for the given Labels map (the label names
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Metric is created. Implications of
// creating a Metric without using it and keeping the Metric for later use
// are the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
//
// Note that GetMetricWith is usually not called directly but through a wrapper
// around MetricVec, implementing a vector for a specific Metric implementation,
// for example GaugeVec.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
@ -142,7 +222,7 @@ func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
}
func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
return 0, err
}
@ -165,7 +245,7 @@ func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
return h, nil
}
func (m *metricVec) hashLabels(labels Labels) (uint64, error) {
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
return 0, err
}
@ -264,7 +344,9 @@ func (m *metricMap) deleteByHashWithLabelValues(
}
if len(metrics) > 1 {
old := metrics
m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
old[len(old)-1] = metricWithLabelValues{}
} else {
delete(m.metrics, h)
}
@ -290,7 +372,9 @@ func (m *metricMap) deleteByHashWithLabels(
}
if len(metrics) > 1 {
old := metrics
m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
old[len(old)-1] = metricWithLabelValues{}
} else {
delete(m.metrics, h)
}

View file

@ -17,6 +17,7 @@ import (
"fmt"
"sort"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
@ -27,10 +28,13 @@ import (
// registered with the wrapped Registerer in a modified way. The modified
// Collector adds the provided Labels to all Metrics it collects (as
// ConstLabels). The Metrics collected by the unmodified Collector must not
// duplicate any of those labels.
// duplicate any of those labels. Wrapping a nil value is valid, resulting
// in a no-op Registerer.
//
// WrapRegistererWith provides a way to add fixed labels to a subset of
// Collectors. It should not be used to add fixed labels to all metrics exposed.
// Collectors. It should not be used to add fixed labels to all metrics
// exposed. See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
//
// Conflicts between Collectors registered through the original Registerer with
// Collectors registered through the wrapping Registerer will still be
@ -50,6 +54,7 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
// Registerer. Collectors registered with the returned Registerer will be
// registered with the wrapped Registerer in a modified way. The modified
// Collector adds the provided prefix to the name of all Metrics it collects.
// Wrapping a nil value is valid, resulting in a no-op Registerer.
//
// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
// a sub-system. To make this work, register metrics of the sub-system with the
@ -80,6 +85,9 @@ type wrappingRegisterer struct {
}
func (r *wrappingRegisterer) Register(c Collector) error {
if r.wrappedRegisterer == nil {
return nil
}
return r.wrappedRegisterer.Register(&wrappingCollector{
wrappedCollector: c,
prefix: r.prefix,
@ -88,6 +96,9 @@ func (r *wrappingRegisterer) Register(c Collector) error {
}
func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
if r.wrappedRegisterer == nil {
return
}
for _, c := range cs {
if err := r.Register(c); err != nil {
panic(err)
@ -96,6 +107,9 @@ func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
}
func (r *wrappingRegisterer) Unregister(c Collector) bool {
if r.wrappedRegisterer == nil {
return false
}
return r.wrappedRegisterer.Unregister(&wrappingCollector{
wrappedCollector: c,
prefix: r.prefix,

View file

@ -1,11 +1,14 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: metrics.proto
package io_prometheus_client // import "github.com/prometheus/client_model/go"
package io_prometheus_client
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -16,7 +19,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type MetricType int32
@ -35,6 +38,7 @@ var MetricType_name = map[int32]string{
3: "UNTYPED",
4: "HISTOGRAM",
}
var MetricType_value = map[string]int32{
"COUNTER": 0,
"GAUGE": 1,
@ -48,9 +52,11 @@ func (x MetricType) Enum() *MetricType {
*p = x
return p
}
func (x MetricType) String() string {
return proto.EnumName(MetricType_name, int32(x))
}
func (x *MetricType) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
if err != nil {
@ -59,8 +65,9 @@ func (x *MetricType) UnmarshalJSON(data []byte) error {
*x = MetricType(value)
return nil
}
func (MetricType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
return fileDescriptor_6039342a2ba47b72, []int{0}
}
type LabelPair struct {
@ -75,16 +82,17 @@ func (m *LabelPair) Reset() { *m = LabelPair{} }
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
func (*LabelPair) ProtoMessage() {}
func (*LabelPair) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
return fileDescriptor_6039342a2ba47b72, []int{0}
}
func (m *LabelPair) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LabelPair.Unmarshal(m, b)
}
func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
}
func (dst *LabelPair) XXX_Merge(src proto.Message) {
xxx_messageInfo_LabelPair.Merge(dst, src)
func (m *LabelPair) XXX_Merge(src proto.Message) {
xxx_messageInfo_LabelPair.Merge(m, src)
}
func (m *LabelPair) XXX_Size() int {
return xxx_messageInfo_LabelPair.Size(m)
@ -120,16 +128,17 @@ func (m *Gauge) Reset() { *m = Gauge{} }
func (m *Gauge) String() string { return proto.CompactTextString(m) }
func (*Gauge) ProtoMessage() {}
func (*Gauge) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1}
return fileDescriptor_6039342a2ba47b72, []int{1}
}
func (m *Gauge) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Gauge.Unmarshal(m, b)
}
func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
}
func (dst *Gauge) XXX_Merge(src proto.Message) {
xxx_messageInfo_Gauge.Merge(dst, src)
func (m *Gauge) XXX_Merge(src proto.Message) {
xxx_messageInfo_Gauge.Merge(m, src)
}
func (m *Gauge) XXX_Size() int {
return xxx_messageInfo_Gauge.Size(m)
@ -148,26 +157,28 @@ func (m *Gauge) GetValue() float64 {
}
type Counter struct {
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Counter) Reset() { *m = Counter{} }
func (m *Counter) String() string { return proto.CompactTextString(m) }
func (*Counter) ProtoMessage() {}
func (*Counter) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2}
return fileDescriptor_6039342a2ba47b72, []int{2}
}
func (m *Counter) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Counter.Unmarshal(m, b)
}
func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
}
func (dst *Counter) XXX_Merge(src proto.Message) {
xxx_messageInfo_Counter.Merge(dst, src)
func (m *Counter) XXX_Merge(src proto.Message) {
xxx_messageInfo_Counter.Merge(m, src)
}
func (m *Counter) XXX_Size() int {
return xxx_messageInfo_Counter.Size(m)
@ -185,6 +196,13 @@ func (m *Counter) GetValue() float64 {
return 0
}
func (m *Counter) GetExemplar() *Exemplar {
if m != nil {
return m.Exemplar
}
return nil
}
type Quantile struct {
Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
@ -197,16 +215,17 @@ func (m *Quantile) Reset() { *m = Quantile{} }
func (m *Quantile) String() string { return proto.CompactTextString(m) }
func (*Quantile) ProtoMessage() {}
func (*Quantile) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3}
return fileDescriptor_6039342a2ba47b72, []int{3}
}
func (m *Quantile) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Quantile.Unmarshal(m, b)
}
func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
}
func (dst *Quantile) XXX_Merge(src proto.Message) {
xxx_messageInfo_Quantile.Merge(dst, src)
func (m *Quantile) XXX_Merge(src proto.Message) {
xxx_messageInfo_Quantile.Merge(m, src)
}
func (m *Quantile) XXX_Size() int {
return xxx_messageInfo_Quantile.Size(m)
@ -244,16 +263,17 @@ func (m *Summary) Reset() { *m = Summary{} }
func (m *Summary) String() string { return proto.CompactTextString(m) }
func (*Summary) ProtoMessage() {}
func (*Summary) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4}
return fileDescriptor_6039342a2ba47b72, []int{4}
}
func (m *Summary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Summary.Unmarshal(m, b)
}
func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
}
func (dst *Summary) XXX_Merge(src proto.Message) {
xxx_messageInfo_Summary.Merge(dst, src)
func (m *Summary) XXX_Merge(src proto.Message) {
xxx_messageInfo_Summary.Merge(m, src)
}
func (m *Summary) XXX_Size() int {
return xxx_messageInfo_Summary.Size(m)
@ -296,16 +316,17 @@ func (m *Untyped) Reset() { *m = Untyped{} }
func (m *Untyped) String() string { return proto.CompactTextString(m) }
func (*Untyped) ProtoMessage() {}
func (*Untyped) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5}
return fileDescriptor_6039342a2ba47b72, []int{5}
}
func (m *Untyped) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Untyped.Unmarshal(m, b)
}
func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
}
func (dst *Untyped) XXX_Merge(src proto.Message) {
xxx_messageInfo_Untyped.Merge(dst, src)
func (m *Untyped) XXX_Merge(src proto.Message) {
xxx_messageInfo_Untyped.Merge(m, src)
}
func (m *Untyped) XXX_Size() int {
return xxx_messageInfo_Untyped.Size(m)
@ -336,16 +357,17 @@ func (m *Histogram) Reset() { *m = Histogram{} }
func (m *Histogram) String() string { return proto.CompactTextString(m) }
func (*Histogram) ProtoMessage() {}
func (*Histogram) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6}
return fileDescriptor_6039342a2ba47b72, []int{6}
}
func (m *Histogram) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Histogram.Unmarshal(m, b)
}
func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
}
func (dst *Histogram) XXX_Merge(src proto.Message) {
xxx_messageInfo_Histogram.Merge(dst, src)
func (m *Histogram) XXX_Merge(src proto.Message) {
xxx_messageInfo_Histogram.Merge(m, src)
}
func (m *Histogram) XXX_Size() int {
return xxx_messageInfo_Histogram.Size(m)
@ -378,27 +400,29 @@ func (m *Histogram) GetBucket() []*Bucket {
}
type Bucket struct {
CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Bucket) Reset() { *m = Bucket{} }
func (m *Bucket) String() string { return proto.CompactTextString(m) }
func (*Bucket) ProtoMessage() {}
func (*Bucket) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7}
return fileDescriptor_6039342a2ba47b72, []int{7}
}
func (m *Bucket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Bucket.Unmarshal(m, b)
}
func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
}
func (dst *Bucket) XXX_Merge(src proto.Message) {
xxx_messageInfo_Bucket.Merge(dst, src)
func (m *Bucket) XXX_Merge(src proto.Message) {
xxx_messageInfo_Bucket.Merge(m, src)
}
func (m *Bucket) XXX_Size() int {
return xxx_messageInfo_Bucket.Size(m)
@ -423,6 +447,68 @@ func (m *Bucket) GetUpperBound() float64 {
return 0
}
func (m *Bucket) GetExemplar() *Exemplar {
if m != nil {
return m.Exemplar
}
return nil
}
type Exemplar struct {
Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Exemplar) Reset() { *m = Exemplar{} }
func (m *Exemplar) String() string { return proto.CompactTextString(m) }
func (*Exemplar) ProtoMessage() {}
func (*Exemplar) Descriptor() ([]byte, []int) {
return fileDescriptor_6039342a2ba47b72, []int{8}
}
func (m *Exemplar) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Exemplar.Unmarshal(m, b)
}
func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
}
func (m *Exemplar) XXX_Merge(src proto.Message) {
xxx_messageInfo_Exemplar.Merge(m, src)
}
func (m *Exemplar) XXX_Size() int {
return xxx_messageInfo_Exemplar.Size(m)
}
func (m *Exemplar) XXX_DiscardUnknown() {
xxx_messageInfo_Exemplar.DiscardUnknown(m)
}
var xxx_messageInfo_Exemplar proto.InternalMessageInfo
func (m *Exemplar) GetLabel() []*LabelPair {
if m != nil {
return m.Label
}
return nil
}
func (m *Exemplar) GetValue() float64 {
if m != nil && m.Value != nil {
return *m.Value
}
return 0
}
func (m *Exemplar) GetTimestamp() *timestamp.Timestamp {
if m != nil {
return m.Timestamp
}
return nil
}
type Metric struct {
Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
@ -440,16 +526,17 @@ func (m *Metric) Reset() { *m = Metric{} }
func (m *Metric) String() string { return proto.CompactTextString(m) }
func (*Metric) ProtoMessage() {}
func (*Metric) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8}
return fileDescriptor_6039342a2ba47b72, []int{9}
}
func (m *Metric) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Metric.Unmarshal(m, b)
}
func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
}
func (dst *Metric) XXX_Merge(src proto.Message) {
xxx_messageInfo_Metric.Merge(dst, src)
func (m *Metric) XXX_Merge(src proto.Message) {
xxx_messageInfo_Metric.Merge(m, src)
}
func (m *Metric) XXX_Size() int {
return xxx_messageInfo_Metric.Size(m)
@ -523,16 +610,17 @@ func (m *MetricFamily) Reset() { *m = MetricFamily{} }
func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
func (*MetricFamily) ProtoMessage() {}
func (*MetricFamily) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9}
return fileDescriptor_6039342a2ba47b72, []int{10}
}
func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
}
func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
}
func (dst *MetricFamily) XXX_Merge(src proto.Message) {
xxx_messageInfo_MetricFamily.Merge(dst, src)
func (m *MetricFamily) XXX_Merge(src proto.Message) {
xxx_messageInfo_MetricFamily.Merge(m, src)
}
func (m *MetricFamily) XXX_Size() int {
return xxx_messageInfo_MetricFamily.Size(m)
@ -572,6 +660,7 @@ func (m *MetricFamily) GetMetric() []*Metric {
}
func init() {
proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
@ -580,50 +669,55 @@ func init() {
proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
}
func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) }
func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) }
var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{
// 591 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e,
0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89,
0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81,
0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47,
0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77,
0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e,
0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64,
0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58,
0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c,
0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2,
0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4,
0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12,
0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c,
0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee,
0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f,
0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54,
0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea,
0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63,
0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45,
0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d,
0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5,
0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d,
0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d,
0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7,
0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8,
0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2,
0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58,
0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11,
0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff,
0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02,
0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd,
0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25,
0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9,
0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27,
0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9,
0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48,
0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00,
var fileDescriptor_6039342a2ba47b72 = []byte{
// 665 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c,
0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55,
0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2,
0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e,
0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa,
0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66,
0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4,
0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45,
0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a,
0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d,
0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b,
0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22,
0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79,
0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0,
0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00,
0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01,
0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe,
0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55,
0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f,
0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31,
0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16,
0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e,
0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c,
0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f,
0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57,
0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64,
0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76,
0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7,
0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95,
0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed,
0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33,
0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07,
0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72,
0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56,
0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6,
0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f,
0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f,
0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27,
0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83,
0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24,
0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff,
0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00,
}

View file

@ -164,7 +164,7 @@ func (sd *SampleDecoder) Decode(s *model.Vector) error {
}
// ExtractSamples builds a slice of samples from the provided metric
// families. If an error occurrs during sample extraction, it continues to
// families. If an error occurs during sample extraction, it continues to
// extract from the remaining metric families. The returned error is the last
// error that has occurred.
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {

View file

@ -18,7 +18,7 @@ import (
"io"
"net/http"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/matttproud/golang_protobuf_extensions/pbutil"
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
@ -30,17 +30,38 @@ type Encoder interface {
Encode(*dto.MetricFamily) error
}
type encoder func(*dto.MetricFamily) error
func (e encoder) Encode(v *dto.MetricFamily) error {
return e(v)
// Closer is implemented by Encoders that need to be closed to finalize
// encoding. (For example, OpenMetrics needs a final `# EOF` line.)
//
// Note that all Encoder implementations returned from this package implement
// Closer, too, even if the Close call is a no-op. This happens in preparation
// for adding a Close method to the Encoder interface directly in a (mildly
// breaking) release in the future.
type Closer interface {
Close() error
}
// Negotiate returns the Content-Type based on the given Accept header.
// If no appropriate accepted type is found, FmtText is returned.
type encoderCloser struct {
encode func(*dto.MetricFamily) error
close func() error
}
func (ec encoderCloser) Encode(v *dto.MetricFamily) error {
return ec.encode(v)
}
func (ec encoderCloser) Close() error {
return ec.close()
}
// Negotiate returns the Content-Type based on the given Accept header. If no
// appropriate accepted type is found, FmtText is returned (which is the
// Prometheus text format). This function will never negotiate FmtOpenMetrics,
// as the support is still experimental. To include the option to negotiate
// FmtOpenMetrics, use NegotiateOpenMetrics.
func Negotiate(h http.Header) Format {
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
// Check for protocol buffer
ver := ac.Params["version"]
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
@ -51,8 +72,6 @@ func Negotiate(h http.Header) Format {
return FmtProtoCompact
}
}
// Check for text format.
ver := ac.Params["version"]
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
return FmtText
}
@ -60,29 +79,84 @@ func Negotiate(h http.Header) Format {
return FmtText
}
// NewEncoder returns a new encoder based on content type negotiation.
// NegotiateIncludingOpenMetrics works like Negotiate but includes
// FmtOpenMetrics as an option for the result. Note that this function is
// temporary and will disappear once FmtOpenMetrics is fully supported and as
// such may be negotiated by the normal Negotiate function.
func NegotiateIncludingOpenMetrics(h http.Header) Format {
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
ver := ac.Params["version"]
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
return FmtProtoDelim
case "text":
return FmtProtoText
case "compact-text":
return FmtProtoCompact
}
}
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
return FmtText
}
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") {
return FmtOpenMetrics
}
}
return FmtText
}
// NewEncoder returns a new encoder based on content type negotiation. All
// Encoder implementations returned by NewEncoder also implement Closer, and
// callers should always call the Close method. It is currently only required
// for FmtOpenMetrics, but a future (breaking) release will add the Close method
// to the Encoder interface directly. The current version of the Encoder
// interface is kept for backwards compatibility.
func NewEncoder(w io.Writer, format Format) Encoder {
switch format {
case FmtProtoDelim:
return encoder(func(v *dto.MetricFamily) error {
_, err := pbutil.WriteDelimited(w, v)
return err
})
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := pbutil.WriteDelimited(w, v)
return err
},
close: func() error { return nil },
}
case FmtProtoCompact:
return encoder(func(v *dto.MetricFamily) error {
_, err := fmt.Fprintln(w, v.String())
return err
})
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := fmt.Fprintln(w, v.String())
return err
},
close: func() error { return nil },
}
case FmtProtoText:
return encoder(func(v *dto.MetricFamily) error {
_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
return err
})
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
return err
},
close: func() error { return nil },
}
case FmtText:
return encoder(func(v *dto.MetricFamily) error {
_, err := MetricFamilyToText(w, v)
return err
})
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := MetricFamilyToText(w, v)
return err
},
close: func() error { return nil },
}
case FmtOpenMetrics:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := MetricFamilyToOpenMetrics(w, v)
return err
},
close: func() error {
_, err := FinalizeOpenMetrics(w)
return err
},
}
}
panic("expfmt.NewEncoder: unknown format")
panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format))
}

View file

@ -19,10 +19,12 @@ type Format string
// Constants to assemble the Content-Type values for the different wire protocols.
const (
TextVersion = "0.0.4"
ProtoType = `application/vnd.google.protobuf`
ProtoProtocol = `io.prometheus.client.MetricFamily`
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
TextVersion = "0.0.4"
ProtoType = `application/vnd.google.protobuf`
ProtoProtocol = `io.prometheus.client.MetricFamily`
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
OpenMetricsType = `application/openmetrics-text`
OpenMetricsVersion = "0.0.1"
// The Content-Type values for the different wire protocols.
FmtUnknown Format = `<unknown>`
@ -30,6 +32,7 @@ const (
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
FmtProtoText Format = ProtoFmt + ` encoding=text`
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8`
)
const (

View file

@ -0,0 +1,527 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package expfmt
import (
"bufio"
"bytes"
"fmt"
"io"
"math"
"strconv"
"strings"
"github.com/golang/protobuf/ptypes"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
// OpenMetrics text format and writes the resulting lines to 'out'. It returns
// the number of bytes written and any error encountered. The output will have
// the same order as the input, no further sorting is performed. Furthermore,
// this function assumes the input is already sanitized and does not perform any
// sanity checks. If the input contains duplicate metrics or invalid metric or
// label names, the conversion will result in invalid text format output.
//
// This function fulfills the type 'expfmt.encoder'.
//
// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
// on individual metric families, it is the responsibility of the caller to
// append this line to 'out' once all metric families have been written.
// Conveniently, this can be done by calling FinalizeOpenMetrics.
//
// The output should be fully OpenMetrics compliant. However, there are a few
// missing features and peculiarities to avoid complications when switching from
// Prometheus to OpenMetrics or vice versa:
//
// - Counters are expected to have the `_total` suffix in their metric name. In
// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
// line. A counter with a missing `_total` suffix is not an error. However,
// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
// output.
//
// - No support for the following (optional) features: `# UNIT` line, `_created`
// line, info type, stateset type, gaugehistogram type.
//
// - The size of exemplar labels is not checked (i.e. it's possible to create
// exemplars that are larger than allowed by the OpenMetrics specification).
//
// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
// with a `NaN` value.)
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
name := in.GetName()
if name == "" {
return 0, fmt.Errorf("MetricFamily has no name: %s", in)
}
// Try the interface upgrade. If it doesn't work, we'll use a
// bufio.Writer from the sync.Pool.
w, ok := out.(enhancedWriter)
if !ok {
b := bufPool.Get().(*bufio.Writer)
b.Reset(out)
w = b
defer func() {
bErr := b.Flush()
if err == nil {
err = bErr
}
bufPool.Put(b)
}()
}
var (
n int
metricType = in.GetType()
shortName = name
)
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
shortName = name[:len(name)-6]
}
// Comments, first HELP, then TYPE.
if in.Help != nil {
n, err = w.WriteString("# HELP ")
written += n
if err != nil {
return
}
n, err = w.WriteString(shortName)
written += n
if err != nil {
return
}
err = w.WriteByte(' ')
written++
if err != nil {
return
}
n, err = writeEscapedString(w, *in.Help, true)
written += n
if err != nil {
return
}
err = w.WriteByte('\n')
written++
if err != nil {
return
}
}
n, err = w.WriteString("# TYPE ")
written += n
if err != nil {
return
}
n, err = w.WriteString(shortName)
written += n
if err != nil {
return
}
switch metricType {
case dto.MetricType_COUNTER:
if strings.HasSuffix(name, "_total") {
n, err = w.WriteString(" counter\n")
} else {
n, err = w.WriteString(" unknown\n")
}
case dto.MetricType_GAUGE:
n, err = w.WriteString(" gauge\n")
case dto.MetricType_SUMMARY:
n, err = w.WriteString(" summary\n")
case dto.MetricType_UNTYPED:
n, err = w.WriteString(" unknown\n")
case dto.MetricType_HISTOGRAM:
n, err = w.WriteString(" histogram\n")
default:
return written, fmt.Errorf("unknown metric type %s", metricType.String())
}
written += n
if err != nil {
return
}
// Finally the samples, one line for each.
for _, metric := range in.Metric {
switch metricType {
case dto.MetricType_COUNTER:
if metric.Counter == nil {
return written, fmt.Errorf(
"expected counter in metric %s %s", name, metric,
)
}
// Note that we have ensured above that either the name
// ends on `_total` or that the rendered type is
// `unknown`. Therefore, no `_total` must be added here.
n, err = writeOpenMetricsSample(
w, name, "", metric, "", 0,
metric.Counter.GetValue(), 0, false,
metric.Counter.Exemplar,
)
case dto.MetricType_GAUGE:
if metric.Gauge == nil {
return written, fmt.Errorf(
"expected gauge in metric %s %s", name, metric,
)
}
n, err = writeOpenMetricsSample(
w, name, "", metric, "", 0,
metric.Gauge.GetValue(), 0, false,
nil,
)
case dto.MetricType_UNTYPED:
if metric.Untyped == nil {
return written, fmt.Errorf(
"expected untyped in metric %s %s", name, metric,
)
}
n, err = writeOpenMetricsSample(
w, name, "", metric, "", 0,
metric.Untyped.GetValue(), 0, false,
nil,
)
case dto.MetricType_SUMMARY:
if metric.Summary == nil {
return written, fmt.Errorf(
"expected summary in metric %s %s", name, metric,
)
}
for _, q := range metric.Summary.Quantile {
n, err = writeOpenMetricsSample(
w, name, "", metric,
model.QuantileLabel, q.GetQuantile(),
q.GetValue(), 0, false,
nil,
)
written += n
if err != nil {
return
}
}
n, err = writeOpenMetricsSample(
w, name, "_sum", metric, "", 0,
metric.Summary.GetSampleSum(), 0, false,
nil,
)
written += n
if err != nil {
return
}
n, err = writeOpenMetricsSample(
w, name, "_count", metric, "", 0,
0, metric.Summary.GetSampleCount(), true,
nil,
)
case dto.MetricType_HISTOGRAM:
if metric.Histogram == nil {
return written, fmt.Errorf(
"expected histogram in metric %s %s", name, metric,
)
}
infSeen := false
for _, b := range metric.Histogram.Bucket {
n, err = writeOpenMetricsSample(
w, name, "_bucket", metric,
model.BucketLabel, b.GetUpperBound(),
0, b.GetCumulativeCount(), true,
b.Exemplar,
)
written += n
if err != nil {
return
}
if math.IsInf(b.GetUpperBound(), +1) {
infSeen = true
}
}
if !infSeen {
n, err = writeOpenMetricsSample(
w, name, "_bucket", metric,
model.BucketLabel, math.Inf(+1),
0, metric.Histogram.GetSampleCount(), true,
nil,
)
written += n
if err != nil {
return
}
}
n, err = writeOpenMetricsSample(
w, name, "_sum", metric, "", 0,
metric.Histogram.GetSampleSum(), 0, false,
nil,
)
written += n
if err != nil {
return
}
n, err = writeOpenMetricsSample(
w, name, "_count", metric, "", 0,
0, metric.Histogram.GetSampleCount(), true,
nil,
)
default:
return written, fmt.Errorf(
"unexpected type in metric %s %s", name, metric,
)
}
written += n
if err != nil {
return
}
}
return
}
// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics.
func FinalizeOpenMetrics(w io.Writer) (written int, err error) {
return w.Write([]byte("# EOF\n"))
}
// writeOpenMetricsSample writes a single sample in OpenMetrics text format to
// w, given the metric name, the metric proto message itself, optionally an
// additional label name with a float64 value (use empty string as label name if
// not required), the value (optionally as float64 or uint64, determined by
// useIntValue), and optionally an exemplar (use nil if not required). The
// function returns the number of bytes written and any error encountered.
func writeOpenMetricsSample(
w enhancedWriter,
name, suffix string,
metric *dto.Metric,
additionalLabelName string, additionalLabelValue float64,
floatValue float64, intValue uint64, useIntValue bool,
exemplar *dto.Exemplar,
) (int, error) {
var written int
n, err := w.WriteString(name)
written += n
if err != nil {
return written, err
}
if suffix != "" {
n, err = w.WriteString(suffix)
written += n
if err != nil {
return written, err
}
}
n, err = writeOpenMetricsLabelPairs(
w, metric.Label, additionalLabelName, additionalLabelValue,
)
written += n
if err != nil {
return written, err
}
err = w.WriteByte(' ')
written++
if err != nil {
return written, err
}
if useIntValue {
n, err = writeUint(w, intValue)
} else {
n, err = writeOpenMetricsFloat(w, floatValue)
}
written += n
if err != nil {
return written, err
}
if metric.TimestampMs != nil {
err = w.WriteByte(' ')
written++
if err != nil {
return written, err
}
// TODO(beorn7): Format this directly without converting to a float first.
n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000)
written += n
if err != nil {
return written, err
}
}
if exemplar != nil {
n, err = writeExemplar(w, exemplar)
written += n
if err != nil {
return written, err
}
}
err = w.WriteByte('\n')
written++
if err != nil {
return written, err
}
return written, nil
}
// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float
// in OpenMetrics style.
func writeOpenMetricsLabelPairs(
w enhancedWriter,
in []*dto.LabelPair,
additionalLabelName string, additionalLabelValue float64,
) (int, error) {
if len(in) == 0 && additionalLabelName == "" {
return 0, nil
}
var (
written int
separator byte = '{'
)
for _, lp := range in {
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
n, err := w.WriteString(lp.GetName())
written += n
if err != nil {
return written, err
}
n, err = w.WriteString(`="`)
written += n
if err != nil {
return written, err
}
n, err = writeEscapedString(w, lp.GetValue(), true)
written += n
if err != nil {
return written, err
}
err = w.WriteByte('"')
written++
if err != nil {
return written, err
}
separator = ','
}
if additionalLabelName != "" {
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
n, err := w.WriteString(additionalLabelName)
written += n
if err != nil {
return written, err
}
n, err = w.WriteString(`="`)
written += n
if err != nil {
return written, err
}
n, err = writeOpenMetricsFloat(w, additionalLabelValue)
written += n
if err != nil {
return written, err
}
err = w.WriteByte('"')
written++
if err != nil {
return written, err
}
}
err := w.WriteByte('}')
written++
if err != nil {
return written, err
}
return written, nil
}
// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
// function returns the number of bytes written and any error encountered.
func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
written := 0
n, err := w.WriteString(" # ")
written += n
if err != nil {
return written, err
}
n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0)
written += n
if err != nil {
return written, err
}
err = w.WriteByte(' ')
written++
if err != nil {
return written, err
}
n, err = writeOpenMetricsFloat(w, e.GetValue())
written += n
if err != nil {
return written, err
}
if e.Timestamp != nil {
err = w.WriteByte(' ')
written++
if err != nil {
return written, err
}
ts, err := ptypes.Timestamp((*e).Timestamp)
if err != nil {
return written, err
}
// TODO(beorn7): Format this directly from components of ts to
// avoid overflow/underflow and precision issues of the float
// conversion.
n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9)
written += n
if err != nil {
return written, err
}
}
return written, nil
}
// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting
// number would otherwise contain neither a "." nor an "e".
func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) {
switch {
case f == 1:
return w.WriteString("1.0")
case f == 0:
return w.WriteString("0.0")
case f == -1:
return w.WriteString("-1.0")
case math.IsNaN(f):
return w.WriteString("NaN")
case math.IsInf(f, +1):
return w.WriteString("+Inf")
case math.IsInf(f, -1):
return w.WriteString("-Inf")
default:
bp := numBufPool.Get().(*[]byte)
*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
if !bytes.ContainsAny(*bp, "e.") {
*bp = append(*bp, '.', '0')
}
written, err := w.Write(*bp)
numBufPool.Put(bp)
return written, err
}
}
// writeUint is like writeInt just for uint64.
func writeUint(w enhancedWriter, u uint64) (int, error) {
bp := numBufPool.Get().(*[]byte)
*bp = strconv.AppendUint((*bp)[:0], u, 10)
written, err := w.Write(*bp)
numBufPool.Put(bp)
return written, err
}

View file

@ -14,9 +14,10 @@
package expfmt
import (
"bytes"
"bufio"
"fmt"
"io"
"io/ioutil"
"math"
"strconv"
"strings"
@ -27,7 +28,7 @@ import (
dto "github.com/prometheus/client_model/go"
)
// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer
// enhancedWriter has all the enhanced write functions needed here. bufio.Writer
// implements it.
type enhancedWriter interface {
io.Writer
@ -37,14 +38,13 @@ type enhancedWriter interface {
}
const (
initialBufSize = 512
initialNumBufSize = 24
)
var (
bufPool = sync.Pool{
New: func() interface{} {
return bytes.NewBuffer(make([]byte, 0, initialBufSize))
return bufio.NewWriter(ioutil.Discard)
},
}
numBufPool = sync.Pool{
@ -75,16 +75,14 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
}
// Try the interface upgrade. If it doesn't work, we'll use a
// bytes.Buffer from the sync.Pool and write out its content to out in a
// single go in the end.
// bufio.Writer from the sync.Pool.
w, ok := out.(enhancedWriter)
if !ok {
b := bufPool.Get().(*bytes.Buffer)
b.Reset()
b := bufPool.Get().(*bufio.Writer)
b.Reset(out)
w = b
defer func() {
bWritten, bErr := out.Write(b.Bytes())
written = bWritten
bErr := b.Flush()
if err == nil {
err = bErr
}
@ -425,9 +423,8 @@ var (
func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
if includeDoubleQuote {
return quotedEscaper.WriteString(w, v)
} else {
return escaper.WriteString(w, v)
}
return escaper.WriteString(w, v)
}
// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes

View file

@ -24,7 +24,7 @@ import (
dto "github.com/prometheus/client_model/go"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/prometheus/common/model"
)
@ -299,6 +299,17 @@ func (p *TextParser) startLabelName() stateFn {
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
return nil
}
// Check for duplicate label names.
labels := make(map[string]struct{})
for _, l := range p.currentMetric.Label {
lName := l.GetName()
if _, exists := labels[lName]; !exists {
labels[lName] = struct{}{}
} else {
p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
return nil
}
}
return p.startLabelValue
}
@ -325,7 +336,7 @@ func (p *TextParser) startLabelValue() stateFn {
// - Other labels have to be added to currentLabels for signature calculation.
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
if p.currentLabelPair.GetName() == model.QuantileLabel {
if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
// Create a more helpful error message.
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
return nil
@ -337,7 +348,7 @@ func (p *TextParser) startLabelValue() stateFn {
// Similar special treatment of histograms.
if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
if p.currentLabelPair.GetName() == model.BucketLabel {
if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
// Create a more helpful error message.
p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
return nil
@ -392,7 +403,7 @@ func (p *TextParser) readingValue() stateFn {
if p.readTokenUntilWhitespace(); p.err != nil {
return nil // Unexpected end of input.
}
value, err := strconv.ParseFloat(p.currentToken.String(), 64)
value, err := parseFloat(p.currentToken.String())
if err != nil {
// Create a more helpful error message.
p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
@ -755,3 +766,10 @@ func histogramMetricName(name string) string {
return name
}
}
func parseFloat(s string) (float64, error) {
if strings.ContainsAny(s, "pP_") {
return 0, fmt.Errorf("unsupported character in float")
}
return strconv.ParseFloat(s, 64)
}

View file

@ -20,7 +20,7 @@ const (
prime64 = 1099511628211
)
// hashNew initializies a new fnv64a hash value.
// hashNew initializes a new fnv64a hash value.
func hashNew() uint64 {
return offset64
}

View file

@ -45,6 +45,14 @@ const (
// scrape a target.
MetricsPathLabel = "__metrics_path__"
// ScrapeIntervalLabel is the name of the label that holds the scrape interval
// used to scrape a target.
ScrapeIntervalLabel = "__scrape_interval__"
// ScrapeTimeoutLabel is the name of the label that holds the scrape
// timeout used to scrape a target.
ScrapeTimeoutLabel = "__scrape_timeout__"
// ReservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
ReservedLabelPrefix = "__"

View file

@ -14,6 +14,8 @@
package model
import (
"encoding/json"
"errors"
"fmt"
"math"
"regexp"
@ -181,73 +183,118 @@ func (d *Duration) Type() string {
return "duration"
}
var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$")
// ParseDuration parses a string into a time.Duration, assuming that a year
// always has 365d, a week always has 7d, and a day always has 24h.
func ParseDuration(durationStr string) (Duration, error) {
switch durationStr {
case "0":
// Allow 0 without a unit.
return 0, nil
case "":
return 0, fmt.Errorf("empty duration string")
}
matches := durationRE.FindStringSubmatch(durationStr)
if len(matches) != 3 {
if matches == nil {
return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
}
var (
n, _ = strconv.Atoi(matches[1])
dur = time.Duration(n) * time.Millisecond
)
switch unit := matches[2]; unit {
case "y":
dur *= 1000 * 60 * 60 * 24 * 365
case "w":
dur *= 1000 * 60 * 60 * 24 * 7
case "d":
dur *= 1000 * 60 * 60 * 24
case "h":
dur *= 1000 * 60 * 60
case "m":
dur *= 1000 * 60
case "s":
dur *= 1000
case "ms":
// Value already correct
default:
return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
var dur time.Duration
// Parse the match at pos `pos` in the regex and use `mult` to turn that
// into ms, then add that value to the total parsed duration.
var overflowErr error
m := func(pos int, mult time.Duration) {
if matches[pos] == "" {
return
}
n, _ := strconv.Atoi(matches[pos])
// Check if the provided duration overflows time.Duration (> ~ 290years).
if n > int((1<<63-1)/mult/time.Millisecond) {
overflowErr = errors.New("duration out of range")
}
d := time.Duration(n) * time.Millisecond
dur += d * mult
if dur < 0 {
overflowErr = errors.New("duration out of range")
}
}
return Duration(dur), nil
m(2, 1000*60*60*24*365) // y
m(4, 1000*60*60*24*7) // w
m(6, 1000*60*60*24) // d
m(8, 1000*60*60) // h
m(10, 1000*60) // m
m(12, 1000) // s
m(14, 1) // ms
return Duration(dur), overflowErr
}
func (d Duration) String() string {
var (
ms = int64(time.Duration(d) / time.Millisecond)
unit = "ms"
ms = int64(time.Duration(d) / time.Millisecond)
r = ""
)
if ms == 0 {
return "0s"
}
factors := map[string]int64{
"y": 1000 * 60 * 60 * 24 * 365,
"w": 1000 * 60 * 60 * 24 * 7,
"d": 1000 * 60 * 60 * 24,
"h": 1000 * 60 * 60,
"m": 1000 * 60,
"s": 1000,
"ms": 1,
f := func(unit string, mult int64, exact bool) {
if exact && ms%mult != 0 {
return
}
if v := ms / mult; v > 0 {
r += fmt.Sprintf("%d%s", v, unit)
ms -= v * mult
}
}
switch int64(0) {
case ms % factors["y"]:
unit = "y"
case ms % factors["w"]:
unit = "w"
case ms % factors["d"]:
unit = "d"
case ms % factors["h"]:
unit = "h"
case ms % factors["m"]:
unit = "m"
case ms % factors["s"]:
unit = "s"
// Only format years and weeks if the remainder is zero, as it is often
// easier to read 90d than 12w6d.
f("y", 1000*60*60*24*365, true)
f("w", 1000*60*60*24*7, true)
f("d", 1000*60*60*24, false)
f("h", 1000*60*60, false)
f("m", 1000*60, false)
f("s", 1000, false)
f("ms", 1, false)
return r
}
// MarshalJSON implements the json.Marshaler interface.
func (d Duration) MarshalJSON() ([]byte, error) {
return json.Marshal(d.String())
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (d *Duration) UnmarshalJSON(bytes []byte) error {
var s string
if err := json.Unmarshal(bytes, &s); err != nil {
return err
}
return fmt.Sprintf("%v%v", ms/factors[unit], unit)
dur, err := ParseDuration(s)
if err != nil {
return err
}
*d = dur
return nil
}
// MarshalText implements the encoding.TextMarshaler interface.
func (d *Duration) MarshalText() ([]byte, error) {
return []byte(d.String()), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (d *Duration) UnmarshalText(text []byte) error {
var err error
*d, err = ParseDuration(string(text))
return err
}
// MarshalYAML implements the yaml.Marshaler interface.

View file

@ -1,6 +1,4 @@
# Run only staticcheck for now. Additional linters will be enabled one-by-one.
---
linters:
enable:
- staticcheck
- govet
disable-all: true
- golint

View file

@ -0,0 +1,3 @@
## Prometheus Community Code of Conduct
Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).

View file

@ -2,17 +2,120 @@
Prometheus uses GitHub to manage reviews of pull requests.
* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute)
* If you have a trivial fix or improvement, go ahead and create a pull request,
addressing (with `@...`) the maintainer of this repository (see
addressing (with `@...`) a suitable maintainer of this repository (see
[MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
* If you plan to do something more involved, first discuss your ideas
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
This will avoid unnecessary work and surely give you and us a good deal
of inspiration.
of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on.
* Relevant coding style guidelines are the [Go Code Review
Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
and the _Formatting and style_ section of Peter Bourgon's [Go: Best
Practices for Production
Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style).
* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works)
## Steps to Contribute
Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue.
Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community).
For quickly compiling and testing your changes do:
```
make test # Make sure all the tests pass before you commit and push :)
```
We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action.
## Pull Request Checklist
* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes.
* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests).
* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)).
* Add tests relevant to the fixed bug or new feature.
## Dependency management
The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed.
All dependencies are vendored in the `vendor/` directory.
To add or update a new dependency, use the `go get` command:
```bash
# Pick the latest tagged release.
go get example.com/some/module/pkg
# Pick a specific version.
go get example.com/some/module/pkg@vX.Y.Z
```
Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory:
```bash
# The GO111MODULE variable can be omitted when the code isn't located in GOPATH.
GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor
```
You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request.
## API Implementation Guidelines
### Naming and Documentation
Public functions and structs should normally be named according to the file(s) being read and parsed. For example,
the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`. In addition, the godoc for each public function
should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s).
### Reading vs. Parsing
Most functionality in this library consists of reading files and then parsing the text into structured data. In most
cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and
a private `parseThing(r Reader)` function. This provides a logical separation and allows parsing to be tested
directly without the need to read from the filesystem. Using a `Reader` argument is preferred over other data types
such as `string` or `*File` because it provides the most flexibility regarding the data source. When a set of files
in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead.
### /proc and /sys filesystem I/O
The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O.
Many of the files are changing continuously and the data being read can in some cases change between subsequent
reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls
to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the
full file in a single operation using an internal utility function called `util.ReadFileNoStat`.
This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of
the file.
Note that parsing the file's contents can still be performed one line at a time. This is done by first reading
the full file, and then using a scanner on the `[]byte` or `string` containing the data.
```
data, err := util.ReadFileNoStat("/proc/cpuinfo")
if err != nil {
return err
}
reader := bytes.NewReader(data)
scanner := bufio.NewScanner(reader)
```
The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files
can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does
not bother to check the size of the file before reading.
```
data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity")
```

View file

@ -18,6 +18,8 @@ include Makefile.common
./ttar -C $(dir $*) -x -f $*.ttar
touch $@
fixtures: fixtures/.unpacked
update_fixtures:
rm -vf fixtures/.unpacked
./ttar -c -f fixtures.ttar fixtures/

View file

@ -69,12 +69,21 @@ else
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
endif
PROMU_VERSION ?= 0.4.0
GOTEST := $(GO) test
GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),)
ifneq ($(shell which gotestsum),)
GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif
endif
PROMU_VERSION ?= 0.12.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.16.0
GOLANGCI_LINT_VERSION ?= v1.39.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@ -86,7 +95,8 @@ endif
PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
DOCKERFILE_PATH ?= ./
DOCKERFILE_PATH ?= ./Dockerfile
DOCKERBUILD_CONTEXT ?= ./
DOCKER_REPO ?= prom
DOCKER_ARCHS ?= amd64
@ -108,7 +118,7 @@ endif
%: common-% ;
.PHONY: common-all
common-all: precheck style check_license lint unused build test
common-all: precheck style check_license lint yamllint unused build test
.PHONY: common-style
common-style:
@ -140,15 +150,29 @@ else
$(GO) get $(GOOPTS) -t ./...
endif
.PHONY: update-go-deps
update-go-deps:
@echo ">> updating Go dependencies"
@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
$(GO) get $$m; \
done
GO111MODULE=$(GO111MODULE) $(GO) mod tidy
ifneq (,$(wildcard vendor))
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
endif
.PHONY: common-test-short
common-test-short:
common-test-short: $(GOTEST_DIR)
@echo ">> running short tests"
GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs)
GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs)
.PHONY: common-test
common-test:
common-test: $(GOTEST_DIR)
@echo ">> running all tests"
GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs)
GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
$(GOTEST_DIR):
@mkdir -p $@
.PHONY: common-format
common-format:
@ -174,6 +198,15 @@ else
endif
endif
.PHONY: common-yamllint
common-yamllint:
@echo ">> running yamllint on all YAML files in the repository"
ifeq (, $(shell which yamllint))
@echo "yamllint not installed so skipping"
else
yamllint .
endif
# For backward-compatibility.
.PHONY: common-staticcheck
common-staticcheck: lint
@ -200,7 +233,7 @@ endif
.PHONY: common-build
common-build: promu
@echo ">> building binaries"
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX)
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
.PHONY: common-tarball
common-tarball: promu
@ -211,19 +244,22 @@ common-tarball: promu
common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%:
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
-f $(DOCKERFILE_PATH) \
--build-arg ARCH="$*" \
--build-arg OS="linux" \
$(DOCKERFILE_PATH)
$(DOCKERBUILD_CONTEXT)
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
.PHONY: common-docker-manifest
common-docker-manifest:

View file

@ -1,19 +1,20 @@
# procfs
This procfs package provides functions to retrieve system, kernel and process
This package provides functions to retrieve system, kernel, and process
metrics from the pseudo-filesystems /proc and /sys.
*WARNING*: This package is a work in progress. Its API may still break in
backwards-incompatible ways without warnings. Use it at your own risk.
[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
[![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/procfs.svg)](https://pkg.go.dev/github.com/prometheus/procfs)
[![CircleCI](https://circleci.com/gh/prometheus/procfs/tree/master.svg?style=svg)](https://circleci.com/gh/prometheus/procfs/tree/master)
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
## Usage
The procfs library is organized by packages based on whether the gathered data is coming from
/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, /sys, or both. For example, current cpu statistics are gathered from
/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc,
/sys, or both. For example, cpu statistics are gathered from
`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount
point is initialized, and then the stat information is read.
@ -29,10 +30,17 @@ Some sub-packages such as `blockdevice`, require access to both the proc and sys
stats, err := fs.ProcDiskstats()
```
## Package Organization
The packages in this project are organized according to (1) whether the data comes from the `/proc` or
`/sys` filesystem and (2) the type of information being retrieved. For example, most process information
can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives
is available in the `blockdevices` sub-package.
## Building and Testing
The procfs library is normally built as part of another application. However, when making
changes to the library, the `make test` command can be used to run the API test suite.
The procfs library is intended to be built as part of another application, so there are no distributable binaries.
However, most of the API includes unit tests which can be run with `make test`.
### Updating Test Fixtures

6
vendor/github.com/prometheus/procfs/SECURITY.md generated vendored Normal file
View file

@ -0,0 +1,6 @@
# Reporting a security issue
The Prometheus security policy, including how to report vulnerabilities, can be
found here:
https://prometheus.io/docs/operating/security/

85
vendor/github.com/prometheus/procfs/arp.go generated vendored Normal file
View file

@ -0,0 +1,85 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"fmt"
"io/ioutil"
"net"
"strings"
)
// ARPEntry contains a single row of the columnar data represented in
// /proc/net/arp.
type ARPEntry struct {
// IP address
IPAddr net.IP
// MAC address
HWAddr net.HardwareAddr
// Name of the device
Device string
}
// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
// and then return a slice of ARPEntry's.
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
if err != nil {
return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err)
}
return parseARPEntries(data)
}
func parseARPEntries(data []byte) ([]ARPEntry, error) {
lines := strings.Split(string(data), "\n")
entries := make([]ARPEntry, 0)
var err error
const (
expectedDataWidth = 6
expectedHeaderWidth = 9
)
for _, line := range lines {
columns := strings.Fields(line)
width := len(columns)
if width == expectedHeaderWidth || width == 0 {
continue
} else if width == expectedDataWidth {
entry, err := parseARPEntry(columns)
if err != nil {
return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err)
}
entries = append(entries, entry)
} else {
return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth)
}
}
return entries, err
}
func parseARPEntry(columns []string) (ARPEntry, error) {
ip := net.ParseIP(columns[0])
mac := net.HardwareAddr(columns[3])
entry := ARPEntry{
IPAddr: ip,
HWAddr: mac,
Device: columns[5],
}
return entry, nil
}

View file

@ -31,7 +31,7 @@ type BuddyInfo struct {
Sizes []float64
}
// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
file, err := os.Open(fs.proc.Path("buddyinfo"))
if err != nil {
@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil {
return nil, fmt.Errorf("invalid value in buddyinfo: %s", err)
return nil, fmt.Errorf("invalid value in buddyinfo: %w", err)
}
}

View file

@ -1,4 +1,4 @@
// Copyright 2019 The Prometheus Authors
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@ -11,19 +11,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.12
package procfs
package prometheus
import (
"strings"
import "runtime/debug"
"github.com/prometheus/procfs/internal/util"
)
// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+.
func readBuildInfo() (path, version, sum string) {
path, version, sum = "unknown", "unknown", "unknown"
if bi, ok := debug.ReadBuildInfo(); ok {
path = bi.Main.Path
version = bi.Main.Version
sum = bi.Main.Sum
// CmdLine returns the command line of the kernel.
func (fs FS) CmdLine() ([]string, error) {
data, err := util.ReadFileNoStat(fs.proc.Path("cmdline"))
if err != nil {
return nil, err
}
return
return strings.Fields(string(data)), nil
}

481
vendor/github.com/prometheus/procfs/cpuinfo.go generated vendored Normal file
View file

@ -0,0 +1,481 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package procfs
import (
"bufio"
"bytes"
"errors"
"fmt"
"regexp"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// CPUInfo contains general information about a system CPU found in /proc/cpuinfo
type CPUInfo struct {
Processor uint
VendorID string
CPUFamily string
Model string
ModelName string
Stepping string
Microcode string
CPUMHz float64
CacheSize string
PhysicalID string
Siblings uint
CoreID string
CPUCores uint
APICID string
InitialAPICID string
FPU string
FPUException string
CPUIDLevel uint
WP string
Flags []string
Bugs []string
BogoMips float64
CLFlushSize uint
CacheAlignment uint
AddressSizes string
PowerManagement string
}
var (
cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`)
cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`)
)
// CPUInfo returns information about current system CPUs.
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
func (fs FS) CPUInfo() ([]CPUInfo, error) {
data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo"))
if err != nil {
return nil, err
}
return parseCPUInfo(data)
}
func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
firstcpu := CPUInfo{Processor: uint(v)}
cpuinfo := []CPUInfo{firstcpu}
i := 0
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "processor":
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
i++
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].Processor = uint(v)
case "vendor", "vendor_id":
cpuinfo[i].VendorID = field[1]
case "cpu family":
cpuinfo[i].CPUFamily = field[1]
case "model":
cpuinfo[i].Model = field[1]
case "model name":
cpuinfo[i].ModelName = field[1]
case "stepping":
cpuinfo[i].Stepping = field[1]
case "microcode":
cpuinfo[i].Microcode = field[1]
case "cpu MHz":
v, err := strconv.ParseFloat(field[1], 64)
if err != nil {
return nil, err
}
cpuinfo[i].CPUMHz = v
case "cache size":
cpuinfo[i].CacheSize = field[1]
case "physical id":
cpuinfo[i].PhysicalID = field[1]
case "siblings":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].Siblings = uint(v)
case "core id":
cpuinfo[i].CoreID = field[1]
case "cpu cores":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].CPUCores = uint(v)
case "apicid":
cpuinfo[i].APICID = field[1]
case "initial apicid":
cpuinfo[i].InitialAPICID = field[1]
case "fpu":
cpuinfo[i].FPU = field[1]
case "fpu_exception":
cpuinfo[i].FPUException = field[1]
case "cpuid level":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].CPUIDLevel = uint(v)
case "wp":
cpuinfo[i].WP = field[1]
case "flags":
cpuinfo[i].Flags = strings.Fields(field[1])
case "bugs":
cpuinfo[i].Bugs = strings.Fields(field[1])
case "bogomips":
v, err := strconv.ParseFloat(field[1], 64)
if err != nil {
return nil, err
}
cpuinfo[i].BogoMips = v
case "clflush size":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].CLFlushSize = uint(v)
case "cache_alignment":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].CacheAlignment = uint(v)
case "address sizes":
cpuinfo[i].AddressSizes = field[1]
case "power management":
cpuinfo[i].PowerManagement = field[1]
}
}
return cpuinfo, nil
}
func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
match, _ := regexp.MatchString("^[Pp]rocessor", firstLine)
if !match || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
featuresLine := ""
commonCPUInfo := CPUInfo{}
i := 0
if strings.TrimSpace(field[0]) == "Processor" {
commonCPUInfo = CPUInfo{ModelName: field[1]}
i = -1
} else {
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
firstcpu := CPUInfo{Processor: uint(v)}
cpuinfo = []CPUInfo{firstcpu}
}
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "processor":
cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor
i++
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].Processor = uint(v)
case "BogoMIPS":
if i == -1 {
cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor
i++
cpuinfo[i].Processor = 0
}
v, err := strconv.ParseFloat(field[1], 64)
if err != nil {
return nil, err
}
cpuinfo[i].BogoMips = v
case "Features":
featuresLine = line
case "model name":
cpuinfo[i].ModelName = field[1]
}
}
fields := strings.SplitN(featuresLine, ": ", 2)
for i := range cpuinfo {
cpuinfo[i].Flags = strings.Fields(fields[1])
}
return cpuinfo, nil
}
func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
commonCPUInfo := CPUInfo{VendorID: field[1]}
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "bogomips per cpu":
v, err := strconv.ParseFloat(field[1], 64)
if err != nil {
return nil, err
}
commonCPUInfo.BogoMips = v
case "features":
commonCPUInfo.Flags = strings.Fields(field[1])
}
if strings.HasPrefix(line, "processor") {
match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
if len(match) < 2 {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
cpu := commonCPUInfo
v, err := strconv.ParseUint(match[1], 0, 32)
if err != nil {
return nil, err
}
cpu.Processor = uint(v)
cpuinfo = append(cpuinfo, cpu)
}
if strings.HasPrefix(line, "cpu number") {
break
}
}
i := 0
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "cpu number":
i++
case "cpu MHz dynamic":
clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
v, err := strconv.ParseFloat(clock, 64)
if err != nil {
return nil, err
}
cpuinfo[i].CPUMHz = v
case "physical id":
cpuinfo[i].PhysicalID = field[1]
case "core id":
cpuinfo[i].CoreID = field[1]
case "cpu cores":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].CPUCores = uint(v)
case "siblings":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].Siblings = uint(v)
}
}
return cpuinfo, nil
}
func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
systemType := field[1]
i := 0
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "processor":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
i = int(v)
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
cpuinfo[i].Processor = uint(v)
cpuinfo[i].VendorID = systemType
case "cpu model":
cpuinfo[i].ModelName = field[1]
case "BogoMIPS":
v, err := strconv.ParseFloat(field[1], 64)
if err != nil {
return nil, err
}
cpuinfo[i].BogoMips = v
}
}
return cpuinfo, nil
}
func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
firstcpu := CPUInfo{Processor: uint(v)}
cpuinfo := []CPUInfo{firstcpu}
i := 0
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "processor":
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
i++
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].Processor = uint(v)
case "cpu":
cpuinfo[i].VendorID = field[1]
case "clock":
clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
v, err := strconv.ParseFloat(clock, 64)
if err != nil {
return nil, err
}
cpuinfo[i].CPUMHz = v
}
}
return cpuinfo, nil
}
func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
firstcpu := CPUInfo{Processor: uint(v)}
cpuinfo := []CPUInfo{firstcpu}
i := 0
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "processor":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
i = int(v)
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
cpuinfo[i].Processor = uint(v)
case "hart":
cpuinfo[i].CoreID = field[1]
case "isa":
cpuinfo[i].ModelName = field[1]
}
}
return cpuinfo, nil
}
func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode
return nil, errors.New("not implemented")
}
// firstNonEmptyLine advances the scanner to the first non-empty line
// and returns the contents of that line
func firstNonEmptyLine(scanner *bufio.Scanner) string {
for scanner.Scan() {
line := scanner.Text()
if strings.TrimSpace(line) != "" {
return line
}
}
return ""
}

19
vendor/github.com/prometheus/procfs/cpuinfo_armx.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build arm arm64
package procfs
var parseCPUInfo = parseCPUInfoARM

19
vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build mips mipsle mips64 mips64le
package procfs
var parseCPUInfo = parseCPUInfoMips

19
vendor/github.com/prometheus/procfs/cpuinfo_others.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
package procfs
var parseCPUInfo = parseCPUInfoDummy

19
vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build ppc64 ppc64le
package procfs
var parseCPUInfo = parseCPUInfoPPC

19
vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build riscv riscv64
package procfs
var parseCPUInfo = parseCPUInfoRISCV

18
vendor/github.com/prometheus/procfs/cpuinfo_s390x.go generated vendored Normal file
View file

@ -0,0 +1,18 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package procfs
var parseCPUInfo = parseCPUInfoS390X

19
vendor/github.com/prometheus/procfs/cpuinfo_x86.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build 386 amd64
package procfs
var parseCPUInfo = parseCPUInfoX86

153
vendor/github.com/prometheus/procfs/crypto.go generated vendored Normal file
View file

@ -0,0 +1,153 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Crypto holds info parsed from /proc/crypto.
type Crypto struct {
Alignmask *uint64
Async bool
Blocksize *uint64
Chunksize *uint64
Ctxsize *uint64
Digestsize *uint64
Driver string
Geniv string
Internal string
Ivsize *uint64
Maxauthsize *uint64
MaxKeysize *uint64
MinKeysize *uint64
Module string
Name string
Priority *int64
Refcnt *int64
Seedsize *uint64
Selftest string
Type string
Walksize *uint64
}
// Crypto parses an crypto-file (/proc/crypto) and returns a slice of
// structs containing the relevant info. More information available here:
// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
func (fs FS) Crypto() ([]Crypto, error) {
path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path)
if err != nil {
return nil, fmt.Errorf("error reading crypto %q: %w", path, err)
}
crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("error parsing crypto %q: %w", path, err)
}
return crypto, nil
}
// parseCrypto parses a /proc/crypto stream into Crypto elements.
func parseCrypto(r io.Reader) ([]Crypto, error) {
var out []Crypto
s := bufio.NewScanner(r)
for s.Scan() {
text := s.Text()
switch {
case strings.HasPrefix(text, "name"):
// Each crypto element begins with its name.
out = append(out, Crypto{})
case text == "":
continue
}
kv := strings.Split(text, ":")
if len(kv) != 2 {
return nil, fmt.Errorf("malformed crypto line: %q", text)
}
k := strings.TrimSpace(kv[0])
v := strings.TrimSpace(kv[1])
// Parse the key/value pair into the currently focused element.
c := &out[len(out)-1]
if err := c.parseKV(k, v); err != nil {
return nil, err
}
}
if err := s.Err(); err != nil {
return nil, err
}
return out, nil
}
// parseKV parses a key/value pair into the appropriate field of c.
func (c *Crypto) parseKV(k, v string) error {
vp := util.NewValueParser(v)
switch k {
case "async":
// Interpret literal yes as true.
c.Async = v == "yes"
case "blocksize":
c.Blocksize = vp.PUInt64()
case "chunksize":
c.Chunksize = vp.PUInt64()
case "digestsize":
c.Digestsize = vp.PUInt64()
case "driver":
c.Driver = v
case "geniv":
c.Geniv = v
case "internal":
c.Internal = v
case "ivsize":
c.Ivsize = vp.PUInt64()
case "maxauthsize":
c.Maxauthsize = vp.PUInt64()
case "max keysize":
c.MaxKeysize = vp.PUInt64()
case "min keysize":
c.MinKeysize = vp.PUInt64()
case "module":
c.Module = v
case "name":
c.Name = v
case "priority":
c.Priority = vp.PInt64()
case "refcnt":
c.Refcnt = vp.PInt64()
case "seedsize":
c.Seedsize = vp.PUInt64()
case "selftest":
c.Selftest = v
case "type":
c.Type = v
case "walksize":
c.Walksize = vp.PUInt64()
}
return vp.Err()
}

View file

@ -31,7 +31,7 @@
// log.Fatalf("could not get process: %s", err)
// }
//
// stat, err := p.NewStat()
// stat, err := p.Stat()
// if err != nil {
// log.Fatalf("could not get process stat: %s", err)
// }

File diff suppressed because it is too large Load diff

422
vendor/github.com/prometheus/procfs/fscache.go generated vendored Normal file
View file

@ -0,0 +1,422 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Fscacheinfo represents fscache statistics.
type Fscacheinfo struct {
// Number of index cookies allocated
IndexCookiesAllocated uint64
// data storage cookies allocated
DataStorageCookiesAllocated uint64
// Number of special cookies allocated
SpecialCookiesAllocated uint64
// Number of objects allocated
ObjectsAllocated uint64
// Number of object allocation failures
ObjectAllocationsFailure uint64
// Number of objects that reached the available state
ObjectsAvailable uint64
// Number of objects that reached the dead state
ObjectsDead uint64
// Number of objects that didn't have a coherency check
ObjectsWithoutCoherencyCheck uint64
// Number of objects that passed a coherency check
ObjectsWithCoherencyCheck uint64
// Number of objects that needed a coherency data update
ObjectsNeedCoherencyCheckUpdate uint64
// Number of objects that were declared obsolete
ObjectsDeclaredObsolete uint64
// Number of pages marked as being cached
PagesMarkedAsBeingCached uint64
// Number of uncache page requests seen
UncachePagesRequestSeen uint64
// Number of acquire cookie requests seen
AcquireCookiesRequestSeen uint64
// Number of acq reqs given a NULL parent
AcquireRequestsWithNullParent uint64
// Number of acq reqs rejected due to no cache available
AcquireRequestsRejectedNoCacheAvailable uint64
// Number of acq reqs succeeded
AcquireRequestsSucceeded uint64
// Number of acq reqs rejected due to error
AcquireRequestsRejectedDueToError uint64
// Number of acq reqs failed on ENOMEM
AcquireRequestsFailedDueToEnomem uint64
// Number of lookup calls made on cache backends
LookupsNumber uint64
// Number of negative lookups made
LookupsNegative uint64
// Number of positive lookups made
LookupsPositive uint64
// Number of objects created by lookup
ObjectsCreatedByLookup uint64
// Number of lookups timed out and requeued
LookupsTimedOutAndRequed uint64
InvalidationsNumber uint64
InvalidationsRunning uint64
// Number of update cookie requests seen
UpdateCookieRequestSeen uint64
// Number of upd reqs given a NULL parent
UpdateRequestsWithNullParent uint64
// Number of upd reqs granted CPU time
UpdateRequestsRunning uint64
// Number of relinquish cookie requests seen
RelinquishCookiesRequestSeen uint64
// Number of rlq reqs given a NULL parent
RelinquishCookiesWithNullParent uint64
// Number of rlq reqs waited on completion of creation
RelinquishRequestsWaitingCompleteCreation uint64
// Relinqs rtr
RelinquishRetries uint64
// Number of attribute changed requests seen
AttributeChangedRequestsSeen uint64
// Number of attr changed requests queued
AttributeChangedRequestsQueued uint64
// Number of attr changed rejected -ENOBUFS
AttributeChangedRejectDueToEnobufs uint64
// Number of attr changed failed -ENOMEM
AttributeChangedFailedDueToEnomem uint64
// Number of attr changed ops given CPU time
AttributeChangedOps uint64
// Number of allocation requests seen
AllocationRequestsSeen uint64
// Number of successful alloc reqs
AllocationOkRequests uint64
// Number of alloc reqs that waited on lookup completion
AllocationWaitingOnLookup uint64
// Number of alloc reqs rejected -ENOBUFS
AllocationsRejectedDueToEnobufs uint64
// Number of alloc reqs aborted -ERESTARTSYS
AllocationsAbortedDueToErestartsys uint64
// Number of alloc reqs submitted
AllocationOperationsSubmitted uint64
// Number of alloc reqs waited for CPU time
AllocationsWaitedForCPU uint64
// Number of alloc reqs aborted due to object death
AllocationsAbortedDueToObjectDeath uint64
// Number of retrieval (read) requests seen
RetrievalsReadRequests uint64
// Number of successful retr reqs
RetrievalsOk uint64
// Number of retr reqs that waited on lookup completion
RetrievalsWaitingLookupCompletion uint64
// Number of retr reqs returned -ENODATA
RetrievalsReturnedEnodata uint64
// Number of retr reqs rejected -ENOBUFS
RetrievalsRejectedDueToEnobufs uint64
// Number of retr reqs aborted -ERESTARTSYS
RetrievalsAbortedDueToErestartsys uint64
// Number of retr reqs failed -ENOMEM
RetrievalsFailedDueToEnomem uint64
// Number of retr reqs submitted
RetrievalsRequests uint64
// Number of retr reqs waited for CPU time
RetrievalsWaitingCPU uint64
// Number of retr reqs aborted due to object death
RetrievalsAbortedDueToObjectDeath uint64
// Number of storage (write) requests seen
StoreWriteRequests uint64
// Number of successful store reqs
StoreSuccessfulRequests uint64
// Number of store reqs on a page already pending storage
StoreRequestsOnPendingStorage uint64
// Number of store reqs rejected -ENOBUFS
StoreRequestsRejectedDueToEnobufs uint64
// Number of store reqs failed -ENOMEM
StoreRequestsFailedDueToEnomem uint64
// Number of store reqs submitted
StoreRequestsSubmitted uint64
// Number of store reqs granted CPU time
StoreRequestsRunning uint64
// Number of pages given store req processing time
StorePagesWithRequestsProcessing uint64
// Number of store reqs deleted from tracking tree
StoreRequestsDeleted uint64
// Number of store reqs over store limit
StoreRequestsOverStoreLimit uint64
// Number of release reqs against pages with no pending store
ReleaseRequestsAgainstPagesWithNoPendingStorage uint64
// Number of release reqs against pages stored by time lock granted
ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
// Number of release reqs ignored due to in-progress store
ReleaseRequestsIgnoredDueToInProgressStore uint64
// Number of page stores cancelled due to release req
PageStoresCancelledByReleaseRequests uint64
VmscanWaiting uint64
// Number of times async ops added to pending queues
OpsPending uint64
// Number of times async ops given CPU time
OpsRunning uint64
// Number of times async ops queued for processing
OpsEnqueued uint64
// Number of async ops cancelled
OpsCancelled uint64
// Number of async ops rejected due to object lookup/create failure
OpsRejected uint64
// Number of async ops initialised
OpsInitialised uint64
// Number of async ops queued for deferred release
OpsDeferred uint64
// Number of async ops released (should equal ini=N when idle)
OpsReleased uint64
// Number of deferred-release async ops garbage collected
OpsGarbageCollected uint64
// Number of in-progress alloc_object() cache ops
CacheopAllocationsinProgress uint64
// Number of in-progress lookup_object() cache ops
CacheopLookupObjectInProgress uint64
// Number of in-progress lookup_complete() cache ops
CacheopLookupCompleteInPorgress uint64
// Number of in-progress grab_object() cache ops
CacheopGrabObjectInProgress uint64
CacheopInvalidations uint64
// Number of in-progress update_object() cache ops
CacheopUpdateObjectInProgress uint64
// Number of in-progress drop_object() cache ops
CacheopDropObjectInProgress uint64
// Number of in-progress put_object() cache ops
CacheopPutObjectInProgress uint64
// Number of in-progress attr_changed() cache ops
CacheopAttributeChangeInProgress uint64
// Number of in-progress sync_cache() cache ops
CacheopSyncCacheInProgress uint64
// Number of in-progress read_or_alloc_page() cache ops
CacheopReadOrAllocPageInProgress uint64
// Number of in-progress read_or_alloc_pages() cache ops
CacheopReadOrAllocPagesInProgress uint64
// Number of in-progress allocate_page() cache ops
CacheopAllocatePageInProgress uint64
// Number of in-progress allocate_pages() cache ops
CacheopAllocatePagesInProgress uint64
// Number of in-progress write_page() cache ops
CacheopWritePagesInProgress uint64
// Number of in-progress uncache_page() cache ops
CacheopUncachePagesInProgress uint64
// Number of in-progress dissociate_pages() cache ops
CacheopDissociatePagesInProgress uint64
// Number of object lookups/creations rejected due to lack of space
CacheevLookupsAndCreationsRejectedLackSpace uint64
// Number of stale objects deleted
CacheevStaleObjectsDeleted uint64
// Number of objects retired when relinquished
CacheevRetiredWhenReliquished uint64
// Number of objects culled
CacheevObjectsCulled uint64
}
// Fscacheinfo returns information about current fscache statistics.
// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt
func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats"))
if err != nil {
return Fscacheinfo{}, err
}
m, err := parseFscacheinfo(bytes.NewReader(b))
if err != nil {
return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err)
}
return *m, nil
}
func setFSCacheFields(fields []string, setFields ...*uint64) error {
var err error
if len(fields) < len(setFields) {
return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields))
}
for i := range setFields {
*setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64)
if err != nil {
return err
}
}
return nil
}
func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) {
var m Fscacheinfo
s := bufio.NewScanner(r)
for s.Scan() {
fields := strings.Fields(s.Text())
if len(fields) < 2 {
return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text())
}
switch fields[0] {
case "Cookies:":
err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated,
&m.SpecialCookiesAllocated)
if err != nil {
return &m, err
}
case "Objects:":
err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure,
&m.ObjectsAvailable, &m.ObjectsDead)
if err != nil {
return &m, err
}
case "ChkAux":
err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck,
&m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete)
if err != nil {
return &m, err
}
case "Pages":
err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen)
if err != nil {
return &m, err
}
case "Acquire:":
err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent,
&m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError,
&m.AcquireRequestsFailedDueToEnomem)
if err != nil {
return &m, err
}
case "Lookups:":
err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive,
&m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed)
if err != nil {
return &m, err
}
case "Invals":
err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning)
if err != nil {
return &m, err
}
case "Updates:":
err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent,
&m.UpdateRequestsRunning)
if err != nil {
return &m, err
}
case "Relinqs:":
err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent,
&m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries)
if err != nil {
return &m, err
}
case "AttrChg:":
err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued,
&m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps)
if err != nil {
return &m, err
}
case "Allocs":
if strings.Split(fields[2], "=")[0] == "n" {
err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests,
&m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys)
if err != nil {
return &m, err
}
} else {
err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU,
&m.AllocationsAbortedDueToObjectDeath)
if err != nil {
return &m, err
}
}
case "Retrvls:":
if strings.Split(fields[1], "=")[0] == "n" {
err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion,
&m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys,
&m.RetrievalsFailedDueToEnomem)
if err != nil {
return &m, err
}
} else {
err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath)
if err != nil {
return &m, err
}
}
case "Stores":
if strings.Split(fields[2], "=")[0] == "n" {
err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests,
&m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem)
if err != nil {
return &m, err
}
} else {
err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning,
&m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit)
if err != nil {
return &m, err
}
}
case "VmScan":
err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage,
&m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore,
&m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting)
if err != nil {
return &m, err
}
case "Ops":
if strings.Split(fields[2], "=")[0] == "pend" {
err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected)
if err != nil {
return &m, err
}
} else {
err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected)
if err != nil {
return &m, err
}
}
case "CacheOp:":
if strings.Split(fields[1], "=")[0] == "alo" {
err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress,
&m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress)
if err != nil {
return &m, err
}
} else if strings.Split(fields[1], "=")[0] == "inv" {
err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress,
&m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress,
&m.CacheopSyncCacheInProgress)
if err != nil {
return &m, err
}
} else {
err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress,
&m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress,
&m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress)
if err != nil {
return &m, err
}
}
case "CacheEv:":
err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted,
&m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled)
if err != nil {
return &m, err
}
}
}
return &m, nil
}

View file

@ -26,7 +26,7 @@ const (
// DefaultSysMountPoint is the common mount point of the sys filesystem.
DefaultSysMountPoint = "/sys"
// DefaultConfigfsMountPoint is the commont mount point of the configfs
// DefaultConfigfsMountPoint is the common mount point of the configfs
DefaultConfigfsMountPoint = "/sys/kernel/config"
)
@ -39,10 +39,10 @@ type FS string
func NewFS(mountPoint string) (FS, error) {
info, err := os.Stat(mountPoint)
if err != nil {
return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
return "", fmt.Errorf("could not read %q: %w", mountPoint, err)
}
if !info.IsDir() {
return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
return "", fmt.Errorf("mount point %q is not a directory", mountPoint)
}
return FS(mountPoint), nil

View file

@ -0,0 +1,97 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"io/ioutil"
"strconv"
"strings"
)
// ParseUint32s parses a slice of strings into a slice of uint32s.
func ParseUint32s(ss []string) ([]uint32, error) {
us := make([]uint32, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 10, 32)
if err != nil {
return nil, err
}
us = append(us, uint32(u))
}
return us, nil
}
// ParseUint64s parses a slice of strings into a slice of uint64s.
func ParseUint64s(ss []string) ([]uint64, error) {
us := make([]uint64, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return nil, err
}
us = append(us, u)
}
return us, nil
}
// ParsePInt64s parses a slice of strings into a slice of int64 pointers.
func ParsePInt64s(ss []string) ([]*int64, error) {
us := make([]*int64, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return nil, err
}
us = append(us, &u)
}
return us, nil
}
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
func ReadUintFromFile(path string) (uint64, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return 0, err
}
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
}
// ReadIntFromFile reads a file and attempts to parse a int64 from it.
func ReadIntFromFile(path string) (int64, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return 0, err
}
return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
}
// ParseBool parses a string into a boolean pointer.
func ParseBool(b string) *bool {
var truth bool
switch b {
case "enabled":
truth = true
case "disabled":
truth = false
default:
return nil
}
return &truth
}

View file

@ -0,0 +1,38 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"io"
"io/ioutil"
"os"
)
// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file.
// This is similar to ioutil.ReadFile but without the call to os.Stat, because
// many files in /proc and /sys report incorrect file sizes (either 0 or 4096).
// Reads a max file size of 512kB. For files larger than this, a scanner
// should be used.
func ReadFileNoStat(filename string) ([]byte, error) {
const maxBufferSize = 1024 * 512
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
reader := io.LimitReader(f, maxBufferSize)
return ioutil.ReadAll(reader)
}

View file

@ -0,0 +1,48 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux,!appengine
package util
import (
"bytes"
"os"
"syscall"
)
// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
// https://github.com/prometheus/node_exporter/pull/728/files
//
// Note that this function will not read files larger than 128 bytes.
func SysReadFile(file string) (string, error) {
f, err := os.Open(file)
if err != nil {
return "", err
}
defer f.Close()
// On some machines, hwmon drivers are broken and return EAGAIN. This causes
// Go's ioutil.ReadFile implementation to poll forever.
//
// Since we either want to read data or bail immediately, do the simplest
// possible read using syscall directly.
const sysFileBufferSize = 128
b := make([]byte, sysFileBufferSize)
n, err := syscall.Read(int(f.Fd()), b)
if err != nil {
return "", err
}
return string(bytes.TrimSpace(b[:n])), nil
}

View file

@ -11,12 +11,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.12
// +build linux,appengine !linux
package prometheus
package util
// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before
// 1.12. Remove this whole file once the minimum supported Go version is 1.12.
func readBuildInfo() (path, version, sum string) {
return "unknown", "unknown", "unknown"
import (
"fmt"
)
// SysReadFile is here implemented as a noop for builds that do not support
// the read syscall. For example Windows, or Linux on Google App Engine.
func SysReadFile(file string) (string, error) {
return "", fmt.Errorf("not supported on this platform")
}

View file

@ -0,0 +1,91 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"strconv"
)
// TODO(mdlayher): util packages are an anti-pattern and this should be moved
// somewhere else that is more focused in the future.
// A ValueParser enables parsing a single string into a variety of data types
// in a concise and safe way. The Err method must be invoked after invoking
// any other methods to ensure a value was successfully parsed.
type ValueParser struct {
v string
err error
}
// NewValueParser creates a ValueParser using the input string.
func NewValueParser(v string) *ValueParser {
return &ValueParser{v: v}
}
// Int interprets the underlying value as an int and returns that value.
func (vp *ValueParser) Int() int { return int(vp.int64()) }
// PInt64 interprets the underlying value as an int64 and returns a pointer to
// that value.
func (vp *ValueParser) PInt64() *int64 {
if vp.err != nil {
return nil
}
v := vp.int64()
return &v
}
// int64 interprets the underlying value as an int64 and returns that value.
// TODO: export if/when necessary.
func (vp *ValueParser) int64() int64 {
if vp.err != nil {
return 0
}
// A base value of zero makes ParseInt infer the correct base using the
// string's prefix, if any.
const base = 0
v, err := strconv.ParseInt(vp.v, base, 64)
if err != nil {
vp.err = err
return 0
}
return v
}
// PUInt64 interprets the underlying value as an uint64 and returns a pointer to
// that value.
func (vp *ValueParser) PUInt64() *uint64 {
if vp.err != nil {
return nil
}
// A base value of zero makes ParseInt infer the correct base using the
// string's prefix, if any.
const base = 0
v, err := strconv.ParseUint(vp.v, base, 64)
if err != nil {
vp.err = err
return nil
}
return &v
}
// Err returns the last error, if any, encountered by the ValueParser.
func (vp *ValueParser) Err() error {
return vp.err
}

View file

@ -15,6 +15,7 @@ package procfs
import (
"bufio"
"bytes"
"encoding/hex"
"errors"
"fmt"
@ -24,6 +25,8 @@ import (
"os"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
@ -64,17 +67,16 @@ type IPVSBackendStatus struct {
// IPVSStats reads the IPVS statistics from the specified `proc` filesystem.
func (fs FS) IPVSStats() (IPVSStats, error) {
file, err := os.Open(fs.proc.Path("net/ip_vs_stats"))
data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats"))
if err != nil {
return IPVSStats{}, err
}
defer file.Close()
return parseIPVSStats(file)
return parseIPVSStats(bytes.NewReader(data))
}
// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
func parseIPVSStats(file io.Reader) (IPVSStats, error) {
func parseIPVSStats(r io.Reader) (IPVSStats, error) {
var (
statContent []byte
statLines []string
@ -82,7 +84,7 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) {
stats IPVSStats
)
statContent, err := ioutil.ReadAll(file)
statContent, err := ioutil.ReadAll(r)
if err != nil {
return IPVSStats{}, err
}

62
vendor/github.com/prometheus/procfs/kernel_random.go generated vendored Normal file
View file

@ -0,0 +1,62 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package procfs
import (
"os"
"github.com/prometheus/procfs/internal/util"
)
// KernelRandom contains information about to the kernel's random number generator.
type KernelRandom struct {
// EntropyAvaliable gives the available entropy, in bits.
EntropyAvaliable *uint64
// PoolSize gives the size of the entropy pool, in bits.
PoolSize *uint64
// URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded.
URandomMinReseedSeconds *uint64
// WriteWakeupThreshold the number of bits of entropy below which we wake up processes
// that do a select(2) or poll(2) for write access to /dev/random.
WriteWakeupThreshold *uint64
// ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep
// waiting for entropy from /dev/random.
ReadWakeupThreshold *uint64
}
// KernelRandom returns values from /proc/sys/kernel/random.
func (fs FS) KernelRandom() (KernelRandom, error) {
random := KernelRandom{}
for file, p := range map[string]**uint64{
"entropy_avail": &random.EntropyAvaliable,
"poolsize": &random.PoolSize,
"urandom_min_reseed_secs": &random.URandomMinReseedSeconds,
"write_wakeup_threshold": &random.WriteWakeupThreshold,
"read_wakeup_threshold": &random.ReadWakeupThreshold,
} {
val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file))
if os.IsNotExist(err) {
continue
}
if err != nil {
return random, err
}
*p = &val
}
return random, nil
}

62
vendor/github.com/prometheus/procfs/loadavg.go generated vendored Normal file
View file

@ -0,0 +1,62 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"fmt"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// LoadAvg represents an entry in /proc/loadavg
type LoadAvg struct {
Load1 float64
Load5 float64
Load15 float64
}
// LoadAvg returns loadavg from /proc.
func (fs FS) LoadAvg() (*LoadAvg, error) {
path := fs.proc.Path("loadavg")
data, err := util.ReadFileNoStat(path)
if err != nil {
return nil, err
}
return parseLoad(data)
}
// Parse /proc loadavg and return 1m, 5m and 15m.
func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
loads := make([]float64, 3)
parts := strings.Fields(string(loadavgBytes))
if len(parts) < 3 {
return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes))
}
var err error
for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64)
if err != nil {
return nil, fmt.Errorf("could not parse load %q: %w", load, err)
}
}
return &LoadAvg{
Load1: loads[0],
Load5: loads[1],
Load15: loads[2],
}, nil
}

View file

@ -22,8 +22,12 @@ import (
)
var (
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`)
recoveryLinePctRE = regexp.MustCompile(`= (.+)%`)
recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`)
componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`)
)
// MDStat holds info parsed from /proc/mdstat.
@ -38,12 +42,22 @@ type MDStat struct {
DisksTotal int64
// Number of failed disks.
DisksFailed int64
// Number of "down" disks. (the _ indicator in the status line)
DisksDown int64
// Spare disks in the device.
DisksSpare int64
// Number of blocks the device holds.
BlocksTotal int64
// Number of blocks on the device that are in sync.
BlocksSynced int64
// progress percentage of current sync
BlocksSyncedPct float64
// estimated finishing time for current sync (in minutes)
BlocksSyncedFinishTime float64
// current sync speed (in Kilobytes/sec)
BlocksSyncedSpeed float64
// Name of md component devices
Devices []string
}
// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of
@ -52,11 +66,11 @@ type MDStat struct {
func (fs FS) MDStat() ([]MDStat, error) {
data, err := ioutil.ReadFile(fs.proc.Path("mdstat"))
if err != nil {
return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
return nil, err
}
mdstat, err := parseMDStat(data)
if err != nil {
return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err)
}
return mdstat, nil
}
@ -82,19 +96,16 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
state := deviceFields[2] // active or inactive
if len(lines) <= i+3 {
return nil, fmt.Errorf(
"error parsing %s: too few lines for md device",
mdName,
)
return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName)
}
// Failed disks have the suffix (F) & Spare disks have the suffix (S).
fail := int64(strings.Count(line, "(F)"))
spare := int64(strings.Count(line, "(S)"))
active, total, size, err := evalStatusLine(lines[i], lines[i+1])
active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil {
return nil, fmt.Errorf("error parsing md device lines: %s", err)
return nil, fmt.Errorf("error parsing md device lines: %w", err)
}
syncLineIdx := i + 2
@ -105,13 +116,19 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
// If device is syncing at the moment, get the number of currently
// synced bytes, otherwise that number equals the size of the device.
syncedBlocks := size
speed := float64(0)
finish := float64(0)
pct := float64(0)
recovering := strings.Contains(lines[syncLineIdx], "recovery")
resyncing := strings.Contains(lines[syncLineIdx], "resync")
checking := strings.Contains(lines[syncLineIdx], "check")
// Append recovery and resyncing state info.
if recovering || resyncing {
if recovering || resyncing || checking {
if recovering {
state = "recovering"
} else if checking {
state = "checking"
} else {
state = "resyncing"
}
@ -121,74 +138,125 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
strings.Contains(lines[syncLineIdx], "DELAYED") {
syncedBlocks = 0
} else {
syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil {
return nil, fmt.Errorf("error parsing sync line in md device %s: %s", mdName, err)
return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err)
}
}
}
mdStats = append(mdStats, MDStat{
Name: mdName,
ActivityState: state,
DisksActive: active,
DisksFailed: fail,
DisksSpare: spare,
DisksTotal: total,
BlocksTotal: size,
BlocksSynced: syncedBlocks,
Name: mdName,
ActivityState: state,
DisksActive: active,
DisksFailed: fail,
DisksDown: down,
DisksSpare: spare,
DisksTotal: total,
BlocksTotal: size,
BlocksSynced: syncedBlocks,
BlocksSyncedPct: pct,
BlocksSyncedFinishTime: finish,
BlocksSyncedSpeed: speed,
Devices: evalComponentDevices(deviceFields),
})
}
return mdStats, nil
}
func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) {
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
sizeStr := strings.Fields(statusLine)[0]
size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
}
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
// In the device deviceLine, only disks have a number associated with them in [].
total = int64(strings.Count(deviceLine, "["))
return total, total, size, nil
return total, total, 0, size, nil
}
if strings.Contains(deviceLine, "inactive") {
return 0, 0, size, nil
return 0, 0, 0, size, nil
}
matches := statusLineRE.FindStringSubmatch(statusLine)
if len(matches) != 4 {
return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
if len(matches) != 5 {
return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
}
total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
}
active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
}
down = int64(strings.Count(matches[4], "_"))
return active, total, size, nil
return active, total, down, size, nil
}
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) {
matches := recoveryLineRE.FindStringSubmatch(recoveryLine)
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) {
matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
}
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil {
return 0, fmt.Errorf("%s in recoveryLine: %s", err, recoveryLine)
return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err)
}
return syncedBlocks, nil
// Get percentage complete
matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine)
}
pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
if err != nil {
return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
}
// Get time expected left to complete
matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine)
}
finish, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
}
// Get recovery speed
matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine)
}
speed, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
}
return syncedBlocks, pct, finish, speed, nil
}
func evalComponentDevices(deviceFields []string) []string {
mdComponentDevices := make([]string, 0)
if len(deviceFields) > 3 {
for _, field := range deviceFields[4:] {
match := componentDeviceRE.FindStringSubmatch(field)
if match == nil {
continue
}
mdComponentDevices = append(mdComponentDevices, match[1])
}
}
return mdComponentDevices
}

277
vendor/github.com/prometheus/procfs/meminfo.go generated vendored Normal file
View file

@ -0,0 +1,277 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Meminfo represents memory statistics.
type Meminfo struct {
// Total usable ram (i.e. physical ram minus a few reserved
// bits and the kernel binary code)
MemTotal *uint64
// The sum of LowFree+HighFree
MemFree *uint64
// An estimate of how much memory is available for starting
// new applications, without swapping. Calculated from
// MemFree, SReclaimable, the size of the file LRU lists, and
// the low watermarks in each zone. The estimate takes into
// account that the system needs some page cache to function
// well, and that not all reclaimable slab will be
// reclaimable, due to items being in use. The impact of those
// factors will vary from system to system.
MemAvailable *uint64
// Relatively temporary storage for raw disk blocks shouldn't
// get tremendously large (20MB or so)
Buffers *uint64
Cached *uint64
// Memory that once was swapped out, is swapped back in but
// still also is in the swapfile (if memory is needed it
// doesn't need to be swapped out AGAIN because it is already
// in the swapfile. This saves I/O)
SwapCached *uint64
// Memory that has been used more recently and usually not
// reclaimed unless absolutely necessary.
Active *uint64
// Memory which has been less recently used. It is more
// eligible to be reclaimed for other purposes
Inactive *uint64
ActiveAnon *uint64
InactiveAnon *uint64
ActiveFile *uint64
InactiveFile *uint64
Unevictable *uint64
Mlocked *uint64
// total amount of swap space available
SwapTotal *uint64
// Memory which has been evicted from RAM, and is temporarily
// on the disk
SwapFree *uint64
// Memory which is waiting to get written back to the disk
Dirty *uint64
// Memory which is actively being written back to the disk
Writeback *uint64
// Non-file backed pages mapped into userspace page tables
AnonPages *uint64
// files which have been mapped, such as libraries
Mapped *uint64
Shmem *uint64
// in-kernel data structures cache
Slab *uint64
// Part of Slab, that might be reclaimed, such as caches
SReclaimable *uint64
// Part of Slab, that cannot be reclaimed on memory pressure
SUnreclaim *uint64
KernelStack *uint64
// amount of memory dedicated to the lowest level of page
// tables.
PageTables *uint64
// NFS pages sent to the server, but not yet committed to
// stable storage
NFSUnstable *uint64
// Memory used for block device "bounce buffers"
Bounce *uint64
// Memory used by FUSE for temporary writeback buffers
WritebackTmp *uint64
// Based on the overcommit ratio ('vm.overcommit_ratio'),
// this is the total amount of memory currently available to
// be allocated on the system. This limit is only adhered to
// if strict overcommit accounting is enabled (mode 2 in
// 'vm.overcommit_memory').
// The CommitLimit is calculated with the following formula:
// CommitLimit = ([total RAM pages] - [total huge TLB pages]) *
// overcommit_ratio / 100 + [total swap pages]
// For example, on a system with 1G of physical RAM and 7G
// of swap with a `vm.overcommit_ratio` of 30 it would
// yield a CommitLimit of 7.3G.
// For more details, see the memory overcommit documentation
// in vm/overcommit-accounting.
CommitLimit *uint64
// The amount of memory presently allocated on the system.
// The committed memory is a sum of all of the memory which
// has been allocated by processes, even if it has not been
// "used" by them as of yet. A process which malloc()'s 1G
// of memory, but only touches 300M of it will show up as
// using 1G. This 1G is memory which has been "committed" to
// by the VM and can be used at any time by the allocating
// application. With strict overcommit enabled on the system
// (mode 2 in 'vm.overcommit_memory'),allocations which would
// exceed the CommitLimit (detailed above) will not be permitted.
// This is useful if one needs to guarantee that processes will
// not fail due to lack of memory once that memory has been
// successfully allocated.
CommittedAS *uint64
// total size of vmalloc memory area
VmallocTotal *uint64
// amount of vmalloc area which is used
VmallocUsed *uint64
// largest contiguous block of vmalloc area which is free
VmallocChunk *uint64
HardwareCorrupted *uint64
AnonHugePages *uint64
ShmemHugePages *uint64
ShmemPmdMapped *uint64
CmaTotal *uint64
CmaFree *uint64
HugePagesTotal *uint64
HugePagesFree *uint64
HugePagesRsvd *uint64
HugePagesSurp *uint64
Hugepagesize *uint64
DirectMap4k *uint64
DirectMap2M *uint64
DirectMap1G *uint64
}
// Meminfo returns an information about current kernel/system memory statistics.
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
func (fs FS) Meminfo() (Meminfo, error) {
b, err := util.ReadFileNoStat(fs.proc.Path("meminfo"))
if err != nil {
return Meminfo{}, err
}
m, err := parseMemInfo(bytes.NewReader(b))
if err != nil {
return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err)
}
return *m, nil
}
func parseMemInfo(r io.Reader) (*Meminfo, error) {
var m Meminfo
s := bufio.NewScanner(r)
for s.Scan() {
// Each line has at least a name and value; we ignore the unit.
fields := strings.Fields(s.Text())
if len(fields) < 2 {
return nil, fmt.Errorf("malformed meminfo line: %q", s.Text())
}
v, err := strconv.ParseUint(fields[1], 0, 64)
if err != nil {
return nil, err
}
switch fields[0] {
case "MemTotal:":
m.MemTotal = &v
case "MemFree:":
m.MemFree = &v
case "MemAvailable:":
m.MemAvailable = &v
case "Buffers:":
m.Buffers = &v
case "Cached:":
m.Cached = &v
case "SwapCached:":
m.SwapCached = &v
case "Active:":
m.Active = &v
case "Inactive:":
m.Inactive = &v
case "Active(anon):":
m.ActiveAnon = &v
case "Inactive(anon):":
m.InactiveAnon = &v
case "Active(file):":
m.ActiveFile = &v
case "Inactive(file):":
m.InactiveFile = &v
case "Unevictable:":
m.Unevictable = &v
case "Mlocked:":
m.Mlocked = &v
case "SwapTotal:":
m.SwapTotal = &v
case "SwapFree:":
m.SwapFree = &v
case "Dirty:":
m.Dirty = &v
case "Writeback:":
m.Writeback = &v
case "AnonPages:":
m.AnonPages = &v
case "Mapped:":
m.Mapped = &v
case "Shmem:":
m.Shmem = &v
case "Slab:":
m.Slab = &v
case "SReclaimable:":
m.SReclaimable = &v
case "SUnreclaim:":
m.SUnreclaim = &v
case "KernelStack:":
m.KernelStack = &v
case "PageTables:":
m.PageTables = &v
case "NFS_Unstable:":
m.NFSUnstable = &v
case "Bounce:":
m.Bounce = &v
case "WritebackTmp:":
m.WritebackTmp = &v
case "CommitLimit:":
m.CommitLimit = &v
case "Committed_AS:":
m.CommittedAS = &v
case "VmallocTotal:":
m.VmallocTotal = &v
case "VmallocUsed:":
m.VmallocUsed = &v
case "VmallocChunk:":
m.VmallocChunk = &v
case "HardwareCorrupted:":
m.HardwareCorrupted = &v
case "AnonHugePages:":
m.AnonHugePages = &v
case "ShmemHugePages:":
m.ShmemHugePages = &v
case "ShmemPmdMapped:":
m.ShmemPmdMapped = &v
case "CmaTotal:":
m.CmaTotal = &v
case "CmaFree:":
m.CmaFree = &v
case "HugePages_Total:":
m.HugePagesTotal = &v
case "HugePages_Free:":
m.HugePagesFree = &v
case "HugePages_Rsvd:":
m.HugePagesRsvd = &v
case "HugePages_Surp:":
m.HugePagesSurp = &v
case "Hugepagesize:":
m.Hugepagesize = &v
case "DirectMap4k:":
m.DirectMap4k = &v
case "DirectMap2M:":
m.DirectMap2M = &v
case "DirectMap1G:":
m.DirectMap1G = &v
}
}
return &m, nil
}

View file

@ -15,19 +15,13 @@ package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"strconv"
"strings"
)
var validOptionalFields = map[string]bool{
"shared": true,
"master": true,
"propagate_from": true,
"unbindable": true,
}
"github.com/prometheus/procfs/internal/util"
)
// A MountInfo is a type that describes the details, options
// for each mount, parsed from /proc/self/mountinfo.
@ -35,10 +29,10 @@ var validOptionalFields = map[string]bool{
// is described in the following man page.
// http://man7.org/linux/man-pages/man5/proc.5.html
type MountInfo struct {
// Unique Id for the mount
MountId int
// The Id of the parent mount
ParentId int
// Unique ID for the mount
MountID int
// The ID of the parent mount
ParentID int
// The value of `st_dev` for the files on this FS
MajorMinorVer string
// The pathname of the directory in the FS that forms
@ -58,18 +52,10 @@ type MountInfo struct {
SuperOptions map[string]string
}
// Returns part of the mountinfo line, if it exists, else an empty string.
func getStringSliceElement(parts []string, idx int, defaultValue string) string {
if idx >= len(parts) {
return defaultValue
}
return parts[idx]
}
// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs.
func parseMountInfo(r io.Reader) ([]*MountInfo, error) {
func parseMountInfo(info []byte) ([]*MountInfo, error) {
mounts := []*MountInfo{}
scanner := bufio.NewScanner(r)
scanner := bufio.NewScanner(bytes.NewReader(info))
for scanner.Scan() {
mountString := scanner.Text()
parsedMounts, err := parseMountInfoString(mountString)
@ -89,58 +75,76 @@ func parseMountInfo(r io.Reader) ([]*MountInfo, error) {
func parseMountInfoString(mountString string) (*MountInfo, error) {
var err error
// OptionalFields can be zero, hence these checks to ensure we do not populate the wrong values in the wrong spots
separatorIndex := strings.Index(mountString, "-")
if separatorIndex == -1 {
return nil, fmt.Errorf("no separator found in mountinfo string: %s", mountString)
mountInfo := strings.Split(mountString, " ")
mountInfoLength := len(mountInfo)
if mountInfoLength < 10 {
return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString)
}
beforeFields := strings.Fields(mountString[:separatorIndex])
afterFields := strings.Fields(mountString[separatorIndex+1:])
if (len(beforeFields) + len(afterFields)) < 7 {
return nil, fmt.Errorf("too few fields")
if mountInfo[mountInfoLength-4] != "-" {
return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4])
}
mount := &MountInfo{
MajorMinorVer: getStringSliceElement(beforeFields, 2, ""),
Root: getStringSliceElement(beforeFields, 3, ""),
MountPoint: getStringSliceElement(beforeFields, 4, ""),
Options: mountOptionsParser(getStringSliceElement(beforeFields, 5, "")),
MajorMinorVer: mountInfo[2],
Root: mountInfo[3],
MountPoint: mountInfo[4],
Options: mountOptionsParser(mountInfo[5]),
OptionalFields: nil,
FSType: getStringSliceElement(afterFields, 0, ""),
Source: getStringSliceElement(afterFields, 1, ""),
SuperOptions: mountOptionsParser(getStringSliceElement(afterFields, 2, "")),
FSType: mountInfo[mountInfoLength-3],
Source: mountInfo[mountInfoLength-2],
SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]),
}
mount.MountId, err = strconv.Atoi(getStringSliceElement(beforeFields, 0, ""))
mount.MountID, err = strconv.Atoi(mountInfo[0])
if err != nil {
return nil, fmt.Errorf("failed to parse mount ID")
}
mount.ParentId, err = strconv.Atoi(getStringSliceElement(beforeFields, 1, ""))
mount.ParentID, err = strconv.Atoi(mountInfo[1])
if err != nil {
return nil, fmt.Errorf("failed to parse parent ID")
}
// Has optional fields, which is a space separated list of values.
// Example: shared:2 master:7
if len(beforeFields) > 6 {
mount.OptionalFields = make(map[string]string)
optionalFields := beforeFields[6:]
for _, field := range optionalFields {
optionSplit := strings.Split(field, ":")
target, value := optionSplit[0], ""
if len(optionSplit) == 2 {
value = optionSplit[1]
}
// Checks if the 'keys' in the optional fields in the mountinfo line are acceptable.
// Allowed 'keys' are shared, master, propagate_from, unbindable.
if _, ok := validOptionalFields[target]; ok {
mount.OptionalFields[target] = value
}
if mountInfo[6] != "" {
mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
if err != nil {
return nil, err
}
}
return mount, nil
}
// Parses the mount options, superblock options.
// mountOptionsIsValidField checks a string against a valid list of optional fields keys.
func mountOptionsIsValidField(s string) bool {
switch s {
case
"shared",
"master",
"propagate_from",
"unbindable":
return true
}
return false
}
// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings.
func mountOptionsParseOptionalFields(o []string) (map[string]string, error) {
optionalFields := make(map[string]string)
for _, field := range o {
optionSplit := strings.SplitN(field, ":", 2)
value := ""
if len(optionSplit) == 2 {
value = optionSplit[1]
}
if mountOptionsIsValidField(optionSplit[0]) {
optionalFields[optionSplit[0]] = value
}
}
return optionalFields, nil
}
// mountOptionsParser parses the mount options, superblock options.
func mountOptionsParser(mountOptions string) map[string]string {
opts := make(map[string]string)
options := strings.Split(mountOptions, ",")
@ -157,22 +161,20 @@ func mountOptionsParser(mountOptions string) map[string]string {
return opts
}
// Retrieves mountinfo information from `/proc/self/mountinfo`.
// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`.
func GetMounts() ([]*MountInfo, error) {
f, err := os.Open("/proc/self/mountinfo")
data, err := util.ReadFileNoStat("/proc/self/mountinfo")
if err != nil {
return nil, err
}
defer f.Close()
return parseMountInfo(f)
return parseMountInfo(data)
}
// Retrieves mountinfo information from a processes' `/proc/<pid>/mountinfo`.
// GetProcMounts retrieves mountinfo information from a processes' `/proc/<pid>/mountinfo`.
func GetProcMounts(pid int) ([]*MountInfo, error) {
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid))
if err != nil {
return nil, err
}
defer f.Close()
return parseMountInfo(f)
return parseMountInfo(data)
}

View file

@ -186,6 +186,8 @@ type NFSOperationStats struct {
CumulativeTotalResponseMilliseconds uint64
// Duration from when a request was enqueued to when it was completely handled.
CumulativeTotalRequestMilliseconds uint64
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
Errors uint64
}
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
@ -336,12 +338,12 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
if len(ss) == 0 {
break
}
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
switch ss[0] {
case fieldOpts:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
if stats.Opts == nil {
stats.Opts = map[string]string{}
}
@ -354,6 +356,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
}
}
case fieldAge:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
// Age integer is in seconds
d, err := time.ParseDuration(ss[1] + "s")
if err != nil {
@ -362,6 +367,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Age = d
case fieldBytes:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
bstats, err := parseNFSBytesStats(ss[1:])
if err != nil {
return nil, err
@ -369,6 +377,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Bytes = *bstats
case fieldEvents:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
estats, err := parseNFSEventsStats(ss[1:])
if err != nil {
return nil, err
@ -494,8 +505,8 @@ func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
// line is reached.
func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
const (
// Number of expected fields in each per-operation statistics set
numFields = 9
// Minimum number of expected fields in each per-operation statistics set
minFields = 9
)
var ops []NFSOperationStats
@ -508,12 +519,12 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
break
}
if len(ss) != numFields {
if len(ss) < minFields {
return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
}
// Skip string operation name for integers
ns := make([]uint64, 0, numFields-1)
ns := make([]uint64, 0, minFields-1)
for _, st := range ss[1:] {
n, err := strconv.ParseUint(st, 10, 64)
if err != nil {
@ -523,7 +534,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
ns = append(ns, n)
}
ops = append(ops, NFSOperationStats{
opStats := NFSOperationStats{
Operation: strings.TrimSuffix(ss[0], ":"),
Requests: ns[0],
Transmissions: ns[1],
@ -533,7 +544,13 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
CumulativeQueueMilliseconds: ns[5],
CumulativeTotalResponseMilliseconds: ns[6],
CumulativeTotalRequestMilliseconds: ns[7],
})
}
if len(ns) > 8 {
opStats.Errors = ns[8]
}
ops = append(ops, opStats)
}
return ops, s.Err()

View file

@ -0,0 +1,153 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
// and contains netfilter conntrack statistics at one CPU core
type ConntrackStatEntry struct {
Entries uint64
Found uint64
Invalid uint64
Ignore uint64
Insert uint64
InsertFailed uint64
Drop uint64
EarlyDrop uint64
SearchRestart uint64
}
// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores
func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
}
// Parses a slice of ConntrackStatEntries from the given filepath
func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
// This file is small and can be read with one syscall.
b, err := util.ReadFileNoStat(path)
if err != nil {
// Do not wrap this error so the caller can detect os.IsNotExist and
// similar conditions.
return nil, err
}
stat, err := parseConntrackStat(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err)
}
return stat, nil
}
// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries
func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
var entries []ConntrackStatEntry
scanner := bufio.NewScanner(r)
scanner.Scan()
for scanner.Scan() {
fields := strings.Fields(scanner.Text())
conntrackEntry, err := parseConntrackStatEntry(fields)
if err != nil {
return nil, err
}
entries = append(entries, *conntrackEntry)
}
return entries, nil
}
// Parses a ConntrackStatEntry from given array of fields
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
if len(fields) != 17 {
return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
}
entry := &ConntrackStatEntry{}
entries, err := parseConntrackStatField(fields[0])
if err != nil {
return nil, err
}
entry.Entries = entries
found, err := parseConntrackStatField(fields[2])
if err != nil {
return nil, err
}
entry.Found = found
invalid, err := parseConntrackStatField(fields[4])
if err != nil {
return nil, err
}
entry.Invalid = invalid
ignore, err := parseConntrackStatField(fields[5])
if err != nil {
return nil, err
}
entry.Ignore = ignore
insert, err := parseConntrackStatField(fields[8])
if err != nil {
return nil, err
}
entry.Insert = insert
insertFailed, err := parseConntrackStatField(fields[9])
if err != nil {
return nil, err
}
entry.InsertFailed = insertFailed
drop, err := parseConntrackStatField(fields[10])
if err != nil {
return nil, err
}
entry.Drop = drop
earlyDrop, err := parseConntrackStatField(fields[11])
if err != nil {
return nil, err
}
entry.EarlyDrop = earlyDrop
searchRestart, err := parseConntrackStatField(fields[16])
if err != nil {
return nil, err
}
entry.SearchRestart = searchRestart
return entry, nil
}
// Parses a uint64 from given hex in string
func parseConntrackStatField(field string) (uint64, error) {
val, err := strconv.ParseUint(field, 16, 64)
if err != nil {
return 0, fmt.Errorf("couldn't parse %q field: %w", field, err)
}
return val, err
}

View file

@ -183,7 +183,6 @@ func (netDev NetDev) Total() NetDevLine {
names = append(names, ifc.Name)
total.RxBytes += ifc.RxBytes
total.RxPackets += ifc.RxPackets
total.RxPackets += ifc.RxPackets
total.RxErrors += ifc.RxErrors
total.RxDropped += ifc.RxDropped
total.RxFIFO += ifc.RxFIFO

226
vendor/github.com/prometheus/procfs/net_ip_socket.go generated vendored Normal file
View file

@ -0,0 +1,226 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"encoding/hex"
"fmt"
"io"
"net"
"os"
"strconv"
"strings"
)
const (
// readLimit is used by io.LimitReader while reading the content of the
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
// as each line represents a single used socket.
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
// With e.g. 150 Byte per line and the maximum number of 65535,
// the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
readLimit = 4294967296 // Byte -> 4 GiB
)
// this contains generic data structures for both udp and tcp sockets
type (
// NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header.
NetIPSocket []*netIPSocketLine
// NetIPSocketSummary provides already computed values like the total queue lengths or
// the total number of used sockets. In contrast to NetIPSocket it does not collect
// the parsed lines into a slice.
NetIPSocketSummary struct {
// TxQueueLength shows the total queue length of all parsed tx_queue lengths.
TxQueueLength uint64
// RxQueueLength shows the total queue length of all parsed rx_queue lengths.
RxQueueLength uint64
// UsedSockets shows the total number of parsed lines representing the
// number of used sockets.
UsedSockets uint64
}
// netIPSocketLine represents the fields parsed from a single line
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
// For the proc file format details, see https://linux.die.net/man/5/proc.
netIPSocketLine struct {
Sl uint64
LocalAddr net.IP
LocalPort uint64
RemAddr net.IP
RemPort uint64
St uint64
TxQueue uint64
RxQueue uint64
UID uint64
Inode uint64
}
)
func newNetIPSocket(file string) (NetIPSocket, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
var netIPSocket NetIPSocket
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetIPSocketLine(fields)
if err != nil {
return nil, err
}
netIPSocket = append(netIPSocket, line)
}
if err := s.Err(); err != nil {
return nil, err
}
return netIPSocket, nil
}
// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file.
func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
var netIPSocketSummary NetIPSocketSummary
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetIPSocketLine(fields)
if err != nil {
return nil, err
}
netIPSocketSummary.TxQueueLength += line.TxQueue
netIPSocketSummary.RxQueueLength += line.RxQueue
netIPSocketSummary.UsedSockets++
}
if err := s.Err(); err != nil {
return nil, err
}
return &netIPSocketSummary, nil
}
// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order.
func parseIP(hexIP string) (net.IP, error) {
var byteIP []byte
byteIP, err := hex.DecodeString(hexIP)
if err != nil {
return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP)
}
switch len(byteIP) {
case 4:
return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil
case 16:
i := net.IP{
byteIP[3], byteIP[2], byteIP[1], byteIP[0],
byteIP[7], byteIP[6], byteIP[5], byteIP[4],
byteIP[11], byteIP[10], byteIP[9], byteIP[8],
byteIP[15], byteIP[14], byteIP[13], byteIP[12],
}
return i, nil
default:
return nil, fmt.Errorf("Unable to parse IP %s", hexIP)
}
}
// parseNetIPSocketLine parses a single line, represented by a list of fields.
func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
line := &netIPSocketLine{}
if len(fields) < 10 {
return nil, fmt.Errorf(
"cannot parse net socket line as it has less then 10 columns %q",
strings.Join(fields, " "),
)
}
var err error // parse error
// sl
s := strings.Split(fields[0], ":")
if len(s) != 2 {
return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0])
}
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err)
}
// local_address
l := strings.Split(fields[1], ":")
if len(l) != 2 {
return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1])
}
if line.LocalAddr, err = parseIP(l[0]); err != nil {
return nil, err
}
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err)
}
// remote_address
r := strings.Split(fields[2], ":")
if len(r) != 2 {
return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1])
}
if line.RemAddr, err = parseIP(r[0]); err != nil {
return nil, err
}
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err)
}
// st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse st value in socket line: %w", err)
}
// tx_queue and rx_queue
q := strings.Split(fields[4], ":")
if len(q) != 2 {
return nil, fmt.Errorf(
"cannot parse tx/rx queues in socket line as it has a missing colon %q",
fields[4],
)
}
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err)
}
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err)
}
// uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err)
}
// inode
if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse inode value in socket line: %w", err)
}
return line, nil
}

180
vendor/github.com/prometheus/procfs/net_protocols.go generated vendored Normal file
View file

@ -0,0 +1,180 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// NetProtocolStats stores the contents from /proc/net/protocols
type NetProtocolStats map[string]NetProtocolStatLine
// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We
// only care about the first six columns as the rest are not likely to change
// and only serve to provide a set of capabilities for each protocol.
type NetProtocolStatLine struct {
Name string // 0 The name of the protocol
Size uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock)
Sockets int64 // 2 Number of sockets in use by this protocol
Memory int64 // 3 Number of 4KB pages allocated by all sockets of this protocol
Pressure int // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure.
MaxHeader uint64 // 5 Protocol specific max header size
Slab bool // 6 Indicates whether or not memory is allocated from the SLAB
ModuleName string // 7 The name of the module that implemented this protocol or "kernel" if not from a module
Capabilities NetProtocolCapabilities
}
// NetProtocolCapabilities contains a list of capabilities for each protocol
type NetProtocolCapabilities struct {
Close bool // 8
Connect bool // 9
Disconnect bool // 10
Accept bool // 11
IoCtl bool // 12
Init bool // 13
Destroy bool // 14
Shutdown bool // 15
SetSockOpt bool // 16
GetSockOpt bool // 17
SendMsg bool // 18
RecvMsg bool // 19
SendPage bool // 20
Bind bool // 21
BacklogRcv bool // 22
Hash bool // 23
UnHash bool // 24
GetPort bool // 25
EnterMemoryPressure bool // 26
}
// NetProtocols reads stats from /proc/net/protocols and returns a map of
// PortocolStatLine entries. As of this writing no official Linux Documentation
// exists, however the source is fairly self-explanatory and the format seems
// stable since its introduction in 2.6.12-rc2
// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452
// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586
func (fs FS) NetProtocols() (NetProtocolStats, error) {
data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols"))
if err != nil {
return NetProtocolStats{}, err
}
return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data)))
}
func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) {
nps := NetProtocolStats{}
// Skip the header line
s.Scan()
for s.Scan() {
line, err := nps.parseLine(s.Text())
if err != nil {
return NetProtocolStats{}, err
}
nps[line.Name] = *line
}
return nps, nil
}
func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) {
line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}}
var err error
const enabled = "yes"
const disabled = "no"
fields := strings.Fields(rawLine)
line.Name = fields[0]
line.Size, err = strconv.ParseUint(fields[1], 10, 64)
if err != nil {
return nil, err
}
line.Sockets, err = strconv.ParseInt(fields[2], 10, 64)
if err != nil {
return nil, err
}
line.Memory, err = strconv.ParseInt(fields[3], 10, 64)
if err != nil {
return nil, err
}
if fields[4] == enabled {
line.Pressure = 1
} else if fields[4] == disabled {
line.Pressure = 0
} else {
line.Pressure = -1
}
line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
if err != nil {
return nil, err
}
if fields[6] == enabled {
line.Slab = true
} else if fields[6] == disabled {
line.Slab = false
} else {
return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name)
}
line.ModuleName = fields[7]
err = line.Capabilities.parseCapabilities(fields[8:])
if err != nil {
return nil, err
}
return line, nil
}
func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error {
// The capabilities are all bools so we can loop over to map them
capabilityFields := [...]*bool{
&pc.Close,
&pc.Connect,
&pc.Disconnect,
&pc.Accept,
&pc.IoCtl,
&pc.Init,
&pc.Destroy,
&pc.Shutdown,
&pc.SetSockOpt,
&pc.GetSockOpt,
&pc.SendMsg,
&pc.RecvMsg,
&pc.SendPage,
&pc.Bind,
&pc.BacklogRcv,
&pc.Hash,
&pc.UnHash,
&pc.GetPort,
&pc.EnterMemoryPressure,
}
for i := 0; i < len(capabilities); i++ {
if capabilities[i] == "y" {
*capabilityFields[i] = true
} else if capabilities[i] == "n" {
*capabilityFields[i] = false
} else {
return fmt.Errorf("unable to parse capability block for protocol: position %d", i)
}
}
return nil
}

163
vendor/github.com/prometheus/procfs/net_sockstat.go generated vendored Normal file
View file

@ -0,0 +1,163 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6,
// respectively.
type NetSockstat struct {
// Used is non-nil for IPv4 sockstat results, but nil for IPv6.
Used *int
Protocols []NetSockstatProtocol
}
// A NetSockstatProtocol contains statistics about a given socket protocol.
// Pointer fields indicate that the value may or may not be present on any
// given protocol.
type NetSockstatProtocol struct {
Protocol string
InUse int
Orphan *int
TW *int
Alloc *int
Mem *int
Memory *int
}
// NetSockstat retrieves IPv4 socket statistics.
func (fs FS) NetSockstat() (*NetSockstat, error) {
return readSockstat(fs.proc.Path("net", "sockstat"))
}
// NetSockstat6 retrieves IPv6 socket statistics.
//
// If IPv6 is disabled on this kernel, the returned error can be checked with
// os.IsNotExist.
func (fs FS) NetSockstat6() (*NetSockstat, error) {
return readSockstat(fs.proc.Path("net", "sockstat6"))
}
// readSockstat opens and parses a NetSockstat from the input file.
func readSockstat(name string) (*NetSockstat, error) {
// This file is small and can be read with one syscall.
b, err := util.ReadFileNoStat(name)
if err != nil {
// Do not wrap this error so the caller can detect os.IsNotExist and
// similar conditions.
return nil, err
}
stat, err := parseSockstat(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err)
}
return stat, nil
}
// parseSockstat reads the contents of a sockstat file and parses a NetSockstat.
func parseSockstat(r io.Reader) (*NetSockstat, error) {
var stat NetSockstat
s := bufio.NewScanner(r)
for s.Scan() {
// Expect a minimum of a protocol and one key/value pair.
fields := strings.Split(s.Text(), " ")
if len(fields) < 3 {
return nil, fmt.Errorf("malformed sockstat line: %q", s.Text())
}
// The remaining fields are key/value pairs.
kvs, err := parseSockstatKVs(fields[1:])
if err != nil {
return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err)
}
// The first field is the protocol. We must trim its colon suffix.
proto := strings.TrimSuffix(fields[0], ":")
switch proto {
case "sockets":
// Special case: IPv4 has a sockets "used" key/value pair that we
// embed at the top level of the structure.
used := kvs["used"]
stat.Used = &used
default:
// Parse all other lines as individual protocols.
nsp := parseSockstatProtocol(kvs)
nsp.Protocol = proto
stat.Protocols = append(stat.Protocols, nsp)
}
}
if err := s.Err(); err != nil {
return nil, err
}
return &stat, nil
}
// parseSockstatKVs parses a string slice into a map of key/value pairs.
func parseSockstatKVs(kvs []string) (map[string]int, error) {
if len(kvs)%2 != 0 {
return nil, errors.New("odd number of fields in key/value pairs")
}
// Iterate two values at a time to gather key/value pairs.
out := make(map[string]int, len(kvs)/2)
for i := 0; i < len(kvs); i += 2 {
vp := util.NewValueParser(kvs[i+1])
out[kvs[i]] = vp.Int()
if err := vp.Err(); err != nil {
return nil, err
}
}
return out, nil
}
// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map.
func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol {
var nsp NetSockstatProtocol
for k, v := range kvs {
// Capture the range variable to ensure we get unique pointers for
// each of the optional fields.
v := v
switch k {
case "inuse":
nsp.InUse = v
case "orphan":
nsp.Orphan = &v
case "tw":
nsp.TW = &v
case "alloc":
nsp.Alloc = &v
case "mem":
nsp.Mem = &v
case "memory":
nsp.Memory = &v
}
}
return nsp
}

102
vendor/github.com/prometheus/procfs/net_softnet.go generated vendored Normal file
View file

@ -0,0 +1,102 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// For the proc file format details,
// See:
// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
// SoftnetStat contains a single row of data from /proc/net/softnet_stat
type SoftnetStat struct {
// Number of processed packets
Processed uint32
// Number of dropped packets
Dropped uint32
// Number of times processing packets ran out of quota
TimeSqueezed uint32
}
var softNetProcFile = "net/softnet_stat"
// NetSoftnetStat reads data from /proc/net/softnet_stat.
func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile))
if err != nil {
return nil, err
}
entries, err := parseSoftnet(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err)
}
return entries, nil
}
func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
const minColumns = 9
s := bufio.NewScanner(r)
var stats []SoftnetStat
for s.Scan() {
columns := strings.Fields(s.Text())
width := len(columns)
if width < minColumns {
return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns)
}
// We only parse the first three columns at the moment.
us, err := parseHexUint32s(columns[0:3])
if err != nil {
return nil, err
}
stats = append(stats, SoftnetStat{
Processed: us[0],
Dropped: us[1],
TimeSqueezed: us[2],
})
}
return stats, nil
}
func parseHexUint32s(ss []string) ([]uint32, error) {
us := make([]uint32, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 16, 32)
if err != nil {
return nil, err
}
us = append(us, uint32(u))
}
return us, nil
}

64
vendor/github.com/prometheus/procfs/net_tcp.go generated vendored Normal file
View file

@ -0,0 +1,64 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
type (
// NetTCP represents the contents of /proc/net/tcp{,6} file without the header.
NetTCP []*netIPSocketLine
// NetTCPSummary provides already computed values like the total queue lengths or
// the total number of used sockets. In contrast to NetTCP it does not collect
// the parsed lines into a slice.
NetTCPSummary NetIPSocketSummary
)
// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
// read from /proc/net/tcp.
func (fs FS) NetTCP() (NetTCP, error) {
return newNetTCP(fs.proc.Path("net/tcp"))
}
// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
// read from /proc/net/tcp6.
func (fs FS) NetTCP6() (NetTCP, error) {
return newNetTCP(fs.proc.Path("net/tcp6"))
}
// NetTCPSummary returns already computed statistics like the total queue lengths
// for TCP datagrams read from /proc/net/tcp.
func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
return newNetTCPSummary(fs.proc.Path("net/tcp"))
}
// NetTCP6Summary returns already computed statistics like the total queue lengths
// for TCP datagrams read from /proc/net/tcp6.
func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
return newNetTCPSummary(fs.proc.Path("net/tcp6"))
}
// newNetTCP creates a new NetTCP{,6} from the contents of the given file.
func newNetTCP(file string) (NetTCP, error) {
n, err := newNetIPSocket(file)
n1 := NetTCP(n)
return n1, err
}
func newNetTCPSummary(file string) (*NetTCPSummary, error) {
n, err := newNetIPSocketSummary(file)
if n == nil {
return nil, err
}
n1 := NetTCPSummary(*n)
return &n1, err
}

64
vendor/github.com/prometheus/procfs/net_udp.go generated vendored Normal file
View file

@ -0,0 +1,64 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
type (
// NetUDP represents the contents of /proc/net/udp{,6} file without the header.
NetUDP []*netIPSocketLine
// NetUDPSummary provides already computed values like the total queue lengths or
// the total number of used sockets. In contrast to NetUDP it does not collect
// the parsed lines into a slice.
NetUDPSummary NetIPSocketSummary
)
// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams
// read from /proc/net/udp.
func (fs FS) NetUDP() (NetUDP, error) {
return newNetUDP(fs.proc.Path("net/udp"))
}
// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams
// read from /proc/net/udp6.
func (fs FS) NetUDP6() (NetUDP, error) {
return newNetUDP(fs.proc.Path("net/udp6"))
}
// NetUDPSummary returns already computed statistics like the total queue lengths
// for UDP datagrams read from /proc/net/udp.
func (fs FS) NetUDPSummary() (*NetUDPSummary, error) {
return newNetUDPSummary(fs.proc.Path("net/udp"))
}
// NetUDP6Summary returns already computed statistics like the total queue lengths
// for UDP datagrams read from /proc/net/udp6.
func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) {
return newNetUDPSummary(fs.proc.Path("net/udp6"))
}
// newNetUDP creates a new NetUDP{,6} from the contents of the given file.
func newNetUDP(file string) (NetUDP, error) {
n, err := newNetIPSocket(file)
n1 := NetUDP(n)
return n1, err
}
func newNetUDPSummary(file string) (*NetUDPSummary, error) {
n, err := newNetIPSocketSummary(file)
if n == nil {
return nil, err
}
n1 := NetUDPSummary(*n)
return &n1, err
}

View file

@ -15,7 +15,6 @@ package procfs
import (
"bufio"
"errors"
"fmt"
"io"
"os"
@ -27,25 +26,15 @@ import (
// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
const (
netUnixKernelPtrIdx = iota
netUnixRefCountIdx
_
netUnixFlagsIdx
netUnixTypeIdx
netUnixStateIdx
netUnixInodeIdx
// Inode and Path are optional.
netUnixStaticFieldsCnt = 6
)
// Constants for the various /proc/net/unix enumerations.
// TODO: match against x/sys/unix or similar?
const (
netUnixTypeStream = 1
netUnixTypeDgram = 2
netUnixTypeSeqpacket = 5
netUnixFlagListen = 1 << 16
netUnixFlagDefault = 0
netUnixFlagListen = 1 << 16
netUnixStateUnconnected = 1
netUnixStateConnecting = 2
@ -53,129 +42,127 @@ const (
netUnixStateDisconnected = 4
)
var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format")
// NetUNIXType is the type of the type field.
type NetUNIXType uint64
// NetUnixType is the type of the type field.
type NetUnixType uint64
// NetUNIXFlags is the type of the flags field.
type NetUNIXFlags uint64
// NetUnixFlags is the type of the flags field.
type NetUnixFlags uint64
// NetUNIXState is the type of the state field.
type NetUNIXState uint64
// NetUnixState is the type of the state field.
type NetUnixState uint64
// NetUnixLine represents a line of /proc/net/unix.
type NetUnixLine struct {
// NetUNIXLine represents a line of /proc/net/unix.
type NetUNIXLine struct {
KernelPtr string
RefCount uint64
Protocol uint64
Flags NetUnixFlags
Type NetUnixType
State NetUnixState
Flags NetUNIXFlags
Type NetUNIXType
State NetUNIXState
Inode uint64
Path string
}
// NetUnix holds the data read from /proc/net/unix.
type NetUnix struct {
Rows []*NetUnixLine
// NetUNIX holds the data read from /proc/net/unix.
type NetUNIX struct {
Rows []*NetUNIXLine
}
// NewNetUnix returns data read from /proc/net/unix.
func NewNetUnix() (*NetUnix, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return nil, err
}
return fs.NewNetUnix()
// NetUNIX returns data read from /proc/net/unix.
func (fs FS) NetUNIX() (*NetUNIX, error) {
return readNetUNIX(fs.proc.Path("net/unix"))
}
// NewNetUnix returns data read from /proc/net/unix.
func (fs FS) NewNetUnix() (*NetUnix, error) {
return NewNetUnixByPath(fs.proc.Path("net/unix"))
}
// NewNetUnixByPath returns data read from /proc/net/unix by file path.
// It might returns an error with partial parsed data, if an error occur after some data parsed.
func NewNetUnixByPath(path string) (*NetUnix, error) {
f, err := os.Open(path)
// readNetUNIX reads data in /proc/net/unix format from the specified file.
func readNetUNIX(file string) (*NetUNIX, error) {
// This file could be quite large and a streaming read is desirable versus
// reading the entire contents at once.
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
return NewNetUnixByReader(f)
return parseNetUNIX(f)
}
// NewNetUnixByReader returns data read from /proc/net/unix by a reader.
// It might returns an error with partial parsed data, if an error occur after some data parsed.
func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) {
nu := &NetUnix{
Rows: make([]*NetUnixLine, 0, 32),
}
scanner := bufio.NewScanner(reader)
// Omit the header line.
scanner.Scan()
header := scanner.Text()
// From the man page of proc(5), it does not contain an Inode field,
// but in actually it exists.
// This code works for both cases.
hasInode := strings.Contains(header, "Inode")
// parseNetUNIX creates a NetUnix structure from the incoming stream.
func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
// Begin scanning by checking for the existence of Inode.
s := bufio.NewScanner(r)
s.Scan()
minFieldsCnt := netUnixStaticFieldsCnt
// From the man page of proc(5), it does not contain an Inode field,
// but in actually it exists. This code works for both cases.
hasInode := strings.Contains(s.Text(), "Inode")
// Expect a minimum number of fields, but Inode and Path are optional:
// Num RefCount Protocol Flags Type St Inode Path
minFields := 6
if hasInode {
minFieldsCnt++
minFields++
}
for scanner.Scan() {
line := scanner.Text()
item, err := nu.parseLine(line, hasInode, minFieldsCnt)
var nu NetUNIX
for s.Scan() {
line := s.Text()
item, err := nu.parseLine(line, hasInode, minFields)
if err != nil {
return nu, err
return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err)
}
nu.Rows = append(nu.Rows, item)
}
return nu, scanner.Err()
if err := s.Err(); err != nil {
return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err)
}
return &nu, nil
}
func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) {
func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
fields := strings.Fields(line)
fieldsLen := len(fields)
if fieldsLen < minFieldsCnt {
return nil, fmt.Errorf(
"Parse Unix domain failed: expect at least %d fields but got %d",
minFieldsCnt, fieldsLen)
l := len(fields)
if l < min {
return nil, fmt.Errorf("expected at least %d fields but got %d", min, l)
}
kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx])
// Field offsets are as follows:
// Num RefCount Protocol Flags Type St Inode Path
kernelPtr := strings.TrimSuffix(fields[0], ":")
users, err := u.parseUsers(fields[1])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err)
return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err)
}
users, err := u.parseUsers(fields[netUnixRefCountIdx])
flags, err := u.parseFlags(fields[3])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err)
return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err)
}
flags, err := u.parseFlags(fields[netUnixFlagsIdx])
typ, err := u.parseType(fields[4])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err)
return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err)
}
typ, err := u.parseType(fields[netUnixTypeIdx])
state, err := u.parseState(fields[5])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err)
}
state, err := u.parseState(fields[netUnixStateIdx])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err)
return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err)
}
var inode uint64
if hasInode {
inodeStr := fields[netUnixInodeIdx]
inode, err = u.parseInode(inodeStr)
inode, err = u.parseInode(fields[6])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err)
return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err)
}
}
nuLine := &NetUnixLine{
n := &NetUNIXLine{
KernelPtr: kernelPtr,
RefCount: users,
Type: typ,
@ -185,61 +172,56 @@ func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetU
}
// Path field is optional.
if fieldsLen > minFieldsCnt {
pathIdx := netUnixInodeIdx + 1
if l > min {
// Path occurs at either index 6 or 7 depending on whether inode is
// already present.
pathIdx := 7
if !hasInode {
pathIdx--
}
nuLine.Path = fields[pathIdx]
n.Path = fields[pathIdx]
}
return nuLine, nil
return n, nil
}
func (u NetUnix) parseKernelPtr(str string) (string, error) {
if !strings.HasSuffix(str, ":") {
return "", errInvalidKernelPtrFmt
}
return str[:len(str)-1], nil
func (u NetUNIX) parseUsers(s string) (uint64, error) {
return strconv.ParseUint(s, 16, 32)
}
func (u NetUnix) parseUsers(hexStr string) (uint64, error) {
return strconv.ParseUint(hexStr, 16, 32)
}
func (u NetUnix) parseProtocol(hexStr string) (uint64, error) {
return strconv.ParseUint(hexStr, 16, 32)
}
func (u NetUnix) parseType(hexStr string) (NetUnixType, error) {
typ, err := strconv.ParseUint(hexStr, 16, 16)
func (u NetUNIX) parseType(s string) (NetUNIXType, error) {
typ, err := strconv.ParseUint(s, 16, 16)
if err != nil {
return 0, err
}
return NetUnixType(typ), nil
return NetUNIXType(typ), nil
}
func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) {
flags, err := strconv.ParseUint(hexStr, 16, 32)
func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) {
flags, err := strconv.ParseUint(s, 16, 32)
if err != nil {
return 0, err
}
return NetUnixFlags(flags), nil
return NetUNIXFlags(flags), nil
}
func (u NetUnix) parseState(hexStr string) (NetUnixState, error) {
st, err := strconv.ParseInt(hexStr, 16, 8)
func (u NetUNIX) parseState(s string) (NetUNIXState, error) {
st, err := strconv.ParseInt(s, 16, 8)
if err != nil {
return 0, err
}
return NetUnixState(st), nil
return NetUNIXState(st), nil
}
func (u NetUnix) parseInode(inodeStr string) (uint64, error) {
return strconv.ParseUint(inodeStr, 10, 64)
func (u NetUNIX) parseInode(s string) (uint64, error) {
return strconv.ParseUint(s, 10, 64)
}
func (t NetUnixType) String() string {
func (t NetUNIXType) String() string {
switch t {
case netUnixTypeStream:
return "stream"
@ -251,7 +233,7 @@ func (t NetUnixType) String() string {
return "unknown"
}
func (f NetUnixFlags) String() string {
func (f NetUNIXFlags) String() string {
switch f {
case netUnixFlagListen:
return "listen"
@ -260,7 +242,7 @@ func (f NetUnixFlags) String() string {
}
}
func (s NetUnixState) String() string {
func (s NetUNIXState) String() string {
switch s {
case netUnixStateUnconnected:
return "unconnected"

68
vendor/github.com/prometheus/procfs/netstat.go generated vendored Normal file
View file

@ -0,0 +1,68 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"os"
"path/filepath"
"strconv"
"strings"
)
// NetStat contains statistics for all the counters from one file
type NetStat struct {
Filename string
Stats map[string][]uint64
}
// NetStat retrieves stats from /proc/net/stat/
func (fs FS) NetStat() ([]NetStat, error) {
statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*"))
if err != nil {
return nil, err
}
var netStatsTotal []NetStat
for _, filePath := range statFiles {
file, err := os.Open(filePath)
if err != nil {
return nil, err
}
netStatFile := NetStat{
Filename: filepath.Base(filePath),
Stats: make(map[string][]uint64),
}
scanner := bufio.NewScanner(file)
scanner.Scan()
// First string is always a header for stats
var headers []string
headers = append(headers, strings.Fields(scanner.Text())...)
// Other strings represent per-CPU counters
for scanner.Scan() {
for num, counter := range strings.Fields(scanner.Text()) {
value, err := strconv.ParseUint(counter, 16, 32)
if err != nil {
return nil, err
}
netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value)
}
}
netStatsTotal = append(netStatsTotal, netStatFile)
}
return netStatsTotal, nil
}

View file

@ -22,6 +22,7 @@ import (
"strings"
"github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util"
)
// Proc provides information about a running process.
@ -104,7 +105,7 @@ func (fs FS) AllProcs() (Procs, error) {
names, err := d.Readdirnames(-1)
if err != nil {
return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err)
return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err)
}
p := Procs{}
@ -121,13 +122,7 @@ func (fs FS) AllProcs() (Procs, error) {
// CmdLine returns the command line of a process.
func (p Proc) CmdLine() ([]string, error) {
f, err := os.Open(p.path("cmdline"))
if err != nil {
return nil, err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
data, err := util.ReadFileNoStat(p.path("cmdline"))
if err != nil {
return nil, err
}
@ -139,9 +134,9 @@ func (p Proc) CmdLine() ([]string, error) {
return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
}
// Comm returns the command name of a process.
func (p Proc) Comm() (string, error) {
f, err := os.Open(p.path("comm"))
// Wchan returns the wchan (wait channel) of a process.
func (p Proc) Wchan() (string, error) {
f, err := os.Open(p.path("wchan"))
if err != nil {
return "", err
}
@ -152,6 +147,21 @@ func (p Proc) Comm() (string, error) {
return "", err
}
wchan := string(data)
if wchan == "" || wchan == "0" {
return "", nil
}
return wchan, nil
}
// Comm returns the command name of a process.
func (p Proc) Comm() (string, error) {
data, err := util.ReadFileNoStat(p.path("comm"))
if err != nil {
return "", err
}
return strings.TrimSpace(string(data)), nil
}
@ -196,7 +206,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) {
for i, n := range names {
fd, err := strconv.ParseInt(n, 10, 32)
if err != nil {
return nil, fmt.Errorf("could not parse fd %s: %s", n, err)
return nil, fmt.Errorf("could not parse fd %q: %w", n, err)
}
fds[i] = uintptr(fd)
}
@ -252,13 +262,11 @@ func (p Proc) MountStats() ([]*Mount, error) {
// It supplies information missing in `/proc/self/mounts` and
// fixes various other problems with that file too.
func (p Proc) MountInfo() ([]*MountInfo, error) {
f, err := os.Open(p.path("mountinfo"))
data, err := util.ReadFileNoStat(p.path("mountinfo"))
if err != nil {
return nil, err
}
defer f.Close()
return parseMountInfo(f)
return parseMountInfo(data)
}
func (p Proc) fileDescriptors() ([]string, error) {
@ -270,7 +278,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
names, err := d.Readdirnames(-1)
if err != nil {
return nil, fmt.Errorf("could not read %s: %s", d.Name(), err)
return nil, fmt.Errorf("could not read %q: %w", d.Name(), err)
}
return names, nil
@ -279,3 +287,33 @@ func (p Proc) fileDescriptors() ([]string, error) {
func (p Proc) path(pa ...string) string {
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
}
// FileDescriptorsInfo retrieves information about all file descriptors of
// the process.
func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) {
names, err := p.fileDescriptors()
if err != nil {
return nil, err
}
var fdinfos ProcFDInfos
for _, n := range names {
fdinfo, err := p.FDInfo(n)
if err != nil {
continue
}
fdinfos = append(fdinfos, *fdinfo)
}
return fdinfos, nil
}
// Schedstat returns task scheduling information for the process.
func (p Proc) Schedstat() (ProcSchedstat, error) {
contents, err := ioutil.ReadFile(p.path("schedstat"))
if err != nil {
return ProcSchedstat{}, err
}
return parseProcSchedstat(string(contents))
}

98
vendor/github.com/prometheus/procfs/proc_cgroup.go generated vendored Normal file
View file

@ -0,0 +1,98 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID
// in this hierarchy
//
// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
type Cgroup struct {
// HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one
// hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number
HierarchyID int
// Controllers using this hierarchy of processes. Controllers are also known as subsystems. For
// Cgroups V2 this may be empty, as all active controllers use the same hierarchy
Controllers []string
// Path of this control group, relative to the mount point of the cgroupfs representing this specific
// hierarchy
Path string
}
// parseCgroupString parses each line of the /proc/[pid]/cgroup file
// Line format is hierarchyID:[controller1,controller2]:path
func parseCgroupString(cgroupStr string) (*Cgroup, error) {
var err error
fields := strings.SplitN(cgroupStr, ":", 3)
if len(fields) < 3 {
return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr)
}
cgroup := &Cgroup{
Path: fields[2],
Controllers: nil,
}
cgroup.HierarchyID, err = strconv.Atoi(fields[0])
if err != nil {
return nil, fmt.Errorf("failed to parse hierarchy ID")
}
if fields[1] != "" {
ssNames := strings.Split(fields[1], ",")
cgroup.Controllers = append(cgroup.Controllers, ssNames...)
}
return cgroup, nil
}
// parseCgroups reads each line of the /proc/[pid]/cgroup file
func parseCgroups(data []byte) ([]Cgroup, error) {
var cgroups []Cgroup
scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
mountString := scanner.Text()
parsedMounts, err := parseCgroupString(mountString)
if err != nil {
return nil, err
}
cgroups = append(cgroups, *parsedMounts)
}
err := scanner.Err()
return cgroups, err
}
// Cgroups reads from /proc/<pid>/cgroups and returns a []*Cgroup struct locating this PID in each process
// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
// so the len of the returned struct is equal to the number of active hierarchies on this system
func (p Proc) Cgroups() ([]Cgroup, error) {
data, err := util.ReadFileNoStat(p.path("cgroup"))
if err != nil {
return nil, err
}
return parseCgroups(data)
}

View file

@ -14,22 +14,16 @@
package procfs
import (
"io/ioutil"
"os"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Environ reads process environments from /proc/<pid>/environ
func (p Proc) Environ() ([]string, error) {
environments := make([]string, 0)
f, err := os.Open(p.path("environ"))
if err != nil {
return environments, err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
data, err := util.ReadFileNoStat(p.path("environ"))
if err != nil {
return environments, err
}

133
vendor/github.com/prometheus/procfs/proc_fdinfo.go generated vendored Normal file
View file

@ -0,0 +1,133 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"regexp"
"github.com/prometheus/procfs/internal/util"
)
// Regexp variables
var (
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
rInotify = regexp.MustCompile(`^inotify`)
rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
)
// ProcFDInfo contains represents file descriptor information.
type ProcFDInfo struct {
// File descriptor
FD string
// File offset
Pos string
// File access mode and status flags
Flags string
// Mount point ID
MntID string
// List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only)
InotifyInfos []InotifyInfo
}
// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty.
func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
data, err := util.ReadFileNoStat(p.path("fdinfo", fd))
if err != nil {
return nil, err
}
var text, pos, flags, mntid string
var inotify []InotifyInfo
scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
text = scanner.Text()
if rPos.MatchString(text) {
pos = rPos.FindStringSubmatch(text)[1]
} else if rFlags.MatchString(text) {
flags = rFlags.FindStringSubmatch(text)[1]
} else if rMntID.MatchString(text) {
mntid = rMntID.FindStringSubmatch(text)[1]
} else if rInotify.MatchString(text) {
newInotify, err := parseInotifyInfo(text)
if err != nil {
return nil, err
}
inotify = append(inotify, *newInotify)
}
}
i := &ProcFDInfo{
FD: fd,
Pos: pos,
Flags: flags,
MntID: mntid,
InotifyInfos: inotify,
}
return i, nil
}
// InotifyInfo represents a single inotify line in the fdinfo file.
type InotifyInfo struct {
// Watch descriptor number
WD string
// Inode number
Ino string
// Device ID
Sdev string
// Mask of events being monitored
Mask string
}
// InotifyInfo constructor. Only available on kernel 3.8+.
func parseInotifyInfo(line string) (*InotifyInfo, error) {
m := rInotifyParts.FindStringSubmatch(line)
if len(m) >= 4 {
var mask string
if len(m) == 5 {
mask = m[4]
}
i := &InotifyInfo{
WD: m[1],
Ino: m[2],
Sdev: m[3],
Mask: mask,
}
return i, nil
}
return nil, fmt.Errorf("invalid inode entry: %q", line)
}
// ProcFDInfos represents a list of ProcFDInfo structs.
type ProcFDInfos []ProcFDInfo
func (p ProcFDInfos) Len() int { return len(p) }
func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD }
// InotifyWatchLen returns the total number of inotify watches
func (p ProcFDInfos) InotifyWatchLen() (int, error) {
length := 0
for _, f := range p {
length += len(f.InotifyInfos)
}
return length, nil
}

View file

@ -15,8 +15,8 @@ package procfs
import (
"fmt"
"io/ioutil"
"os"
"github.com/prometheus/procfs/internal/util"
)
// ProcIO models the content of /proc/<pid>/io.
@ -43,13 +43,7 @@ type ProcIO struct {
func (p Proc) IO() (ProcIO, error) {
pio := ProcIO{}
f, err := os.Open(p.path("io"))
if err != nil {
return pio, err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
data, err := util.ReadFileNoStat(p.path("io"))
if err != nil {
return pio, err
}

View file

@ -26,55 +26,55 @@ import (
// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
type ProcLimits struct {
// CPU time limit in seconds.
CPUTime int64
CPUTime uint64
// Maximum size of files that the process may create.
FileSize int64
FileSize uint64
// Maximum size of the process's data segment (initialized data,
// uninitialized data, and heap).
DataSize int64
DataSize uint64
// Maximum size of the process stack in bytes.
StackSize int64
StackSize uint64
// Maximum size of a core file.
CoreFileSize int64
CoreFileSize uint64
// Limit of the process's resident set in pages.
ResidentSet int64
ResidentSet uint64
// Maximum number of processes that can be created for the real user ID of
// the calling process.
Processes int64
Processes uint64
// Value one greater than the maximum file descriptor number that can be
// opened by this process.
OpenFiles int64
OpenFiles uint64
// Maximum number of bytes of memory that may be locked into RAM.
LockedMemory int64
LockedMemory uint64
// Maximum size of the process's virtual memory address space in bytes.
AddressSpace int64
AddressSpace uint64
// Limit on the combined number of flock(2) locks and fcntl(2) leases that
// this process may establish.
FileLocks int64
FileLocks uint64
// Limit of signals that may be queued for the real user ID of the calling
// process.
PendingSignals int64
PendingSignals uint64
// Limit on the number of bytes that can be allocated for POSIX message
// queues for the real user ID of the calling process.
MsqqueueSize int64
MsqqueueSize uint64
// Limit of the nice priority set using setpriority(2) or nice(2).
NicePriority int64
NicePriority uint64
// Limit of the real-time priority set using sched_setscheduler(2) or
// sched_setparam(2).
RealtimePriority int64
RealtimePriority uint64
// Limit (in microseconds) on the amount of CPU time that a process
// scheduled under a real-time scheduling policy may consume without making
// a blocking system call.
RealtimeTimeout int64
RealtimeTimeout uint64
}
const (
limitsFields = 3
limitsFields = 4
limitsUnlimited = "unlimited"
)
var (
limitsDelimiter = regexp.MustCompile(" +")
limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`)
)
// NewLimits returns the current soft limits of the process.
@ -96,46 +96,49 @@ func (p Proc) Limits() (ProcLimits, error) {
l = ProcLimits{}
s = bufio.NewScanner(f)
)
s.Scan() // Skip limits header
for s.Scan() {
fields := limitsDelimiter.Split(s.Text(), limitsFields)
//fields := limitsMatch.Split(s.Text(), limitsFields)
fields := limitsMatch.FindStringSubmatch(s.Text())
if len(fields) != limitsFields {
return ProcLimits{}, fmt.Errorf(
"couldn't parse %s line %s", f.Name(), s.Text())
return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text())
}
switch fields[0] {
switch fields[1] {
case "Max cpu time":
l.CPUTime, err = parseInt(fields[1])
l.CPUTime, err = parseUint(fields[2])
case "Max file size":
l.FileSize, err = parseInt(fields[1])
l.FileSize, err = parseUint(fields[2])
case "Max data size":
l.DataSize, err = parseInt(fields[1])
l.DataSize, err = parseUint(fields[2])
case "Max stack size":
l.StackSize, err = parseInt(fields[1])
l.StackSize, err = parseUint(fields[2])
case "Max core file size":
l.CoreFileSize, err = parseInt(fields[1])
l.CoreFileSize, err = parseUint(fields[2])
case "Max resident set":
l.ResidentSet, err = parseInt(fields[1])
l.ResidentSet, err = parseUint(fields[2])
case "Max processes":
l.Processes, err = parseInt(fields[1])
l.Processes, err = parseUint(fields[2])
case "Max open files":
l.OpenFiles, err = parseInt(fields[1])
l.OpenFiles, err = parseUint(fields[2])
case "Max locked memory":
l.LockedMemory, err = parseInt(fields[1])
l.LockedMemory, err = parseUint(fields[2])
case "Max address space":
l.AddressSpace, err = parseInt(fields[1])
l.AddressSpace, err = parseUint(fields[2])
case "Max file locks":
l.FileLocks, err = parseInt(fields[1])
l.FileLocks, err = parseUint(fields[2])
case "Max pending signals":
l.PendingSignals, err = parseInt(fields[1])
l.PendingSignals, err = parseUint(fields[2])
case "Max msgqueue size":
l.MsqqueueSize, err = parseInt(fields[1])
l.MsqqueueSize, err = parseUint(fields[2])
case "Max nice priority":
l.NicePriority, err = parseInt(fields[1])
l.NicePriority, err = parseUint(fields[2])
case "Max realtime priority":
l.RealtimePriority, err = parseInt(fields[1])
l.RealtimePriority, err = parseUint(fields[2])
case "Max realtime timeout":
l.RealtimeTimeout, err = parseInt(fields[1])
l.RealtimeTimeout, err = parseUint(fields[2])
}
if err != nil {
return ProcLimits{}, err
@ -145,13 +148,13 @@ func (p Proc) Limits() (ProcLimits, error) {
return l, s.Err()
}
func parseInt(s string) (int64, error) {
func parseUint(s string) (uint64, error) {
if s == limitsUnlimited {
return -1, nil
return 18446744073709551615, nil
}
i, err := strconv.ParseInt(s, 10, 64)
i, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
return 0, fmt.Errorf("couldn't parse value %q: %w", s, err)
}
return i, nil
}

209
vendor/github.com/prometheus/procfs/proc_maps.go generated vendored Normal file
View file

@ -0,0 +1,209 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package procfs
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
"golang.org/x/sys/unix"
)
// ProcMapPermissions contains permission settings read from /proc/[pid]/maps
type ProcMapPermissions struct {
// mapping has the [R]ead flag set
Read bool
// mapping has the [W]rite flag set
Write bool
// mapping has the [X]ecutable flag set
Execute bool
// mapping has the [S]hared flag set
Shared bool
// mapping is marked as [P]rivate (copy on write)
Private bool
}
// ProcMap contains the process memory-mappings of the process,
// read from /proc/[pid]/maps
type ProcMap struct {
// The start address of current mapping.
StartAddr uintptr
// The end address of the current mapping
EndAddr uintptr
// The permissions for this mapping
Perms *ProcMapPermissions
// The current offset into the file/fd (e.g., shared libs)
Offset int64
// Device owner of this mapping (major:minor) in Mkdev format.
Dev uint64
// The inode of the device above
Inode uint64
// The file or psuedofile (or empty==anonymous)
Pathname string
}
// parseDevice parses the device token of a line and converts it to a dev_t
// (mkdev) like structure.
func parseDevice(s string) (uint64, error) {
toks := strings.Split(s, ":")
if len(toks) < 2 {
return 0, fmt.Errorf("unexpected number of fields")
}
major, err := strconv.ParseUint(toks[0], 16, 0)
if err != nil {
return 0, err
}
minor, err := strconv.ParseUint(toks[1], 16, 0)
if err != nil {
return 0, err
}
return unix.Mkdev(uint32(major), uint32(minor)), nil
}
// parseAddress just converts a hex-string to a uintptr
func parseAddress(s string) (uintptr, error) {
a, err := strconv.ParseUint(s, 16, 0)
if err != nil {
return 0, err
}
return uintptr(a), nil
}
// parseAddresses parses the start-end address
func parseAddresses(s string) (uintptr, uintptr, error) {
toks := strings.Split(s, "-")
if len(toks) < 2 {
return 0, 0, fmt.Errorf("invalid address")
}
saddr, err := parseAddress(toks[0])
if err != nil {
return 0, 0, err
}
eaddr, err := parseAddress(toks[1])
if err != nil {
return 0, 0, err
}
return saddr, eaddr, nil
}
// parsePermissions parses a token and returns any that are set.
func parsePermissions(s string) (*ProcMapPermissions, error) {
if len(s) < 4 {
return nil, fmt.Errorf("invalid permissions token")
}
perms := ProcMapPermissions{}
for _, ch := range s {
switch ch {
case 'r':
perms.Read = true
case 'w':
perms.Write = true
case 'x':
perms.Execute = true
case 'p':
perms.Private = true
case 's':
perms.Shared = true
}
}
return &perms, nil
}
// parseProcMap will attempt to parse a single line within a proc/[pid]/maps
// buffer.
func parseProcMap(text string) (*ProcMap, error) {
fields := strings.Fields(text)
if len(fields) < 5 {
return nil, fmt.Errorf("truncated procmap entry")
}
saddr, eaddr, err := parseAddresses(fields[0])
if err != nil {
return nil, err
}
perms, err := parsePermissions(fields[1])
if err != nil {
return nil, err
}
offset, err := strconv.ParseInt(fields[2], 16, 0)
if err != nil {
return nil, err
}
device, err := parseDevice(fields[3])
if err != nil {
return nil, err
}
inode, err := strconv.ParseUint(fields[4], 10, 0)
if err != nil {
return nil, err
}
pathname := ""
if len(fields) >= 5 {
pathname = strings.Join(fields[5:], " ")
}
return &ProcMap{
StartAddr: saddr,
EndAddr: eaddr,
Perms: perms,
Offset: offset,
Dev: device,
Inode: inode,
Pathname: pathname,
}, nil
}
// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the
// process.
func (p Proc) ProcMaps() ([]*ProcMap, error) {
file, err := os.Open(p.path("maps"))
if err != nil {
return nil, err
}
defer file.Close()
maps := []*ProcMap{}
scan := bufio.NewScanner(file)
for scan.Scan() {
m, err := parseProcMap(scan.Text())
if err != nil {
return nil, err
}
maps = append(maps, m)
}
return maps, nil
}

View file

@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
names, err := d.Readdirnames(-1)
if err != nil {
return nil, fmt.Errorf("failed to read contents of ns dir: %v", err)
return nil, fmt.Errorf("failed to read contents of ns dir: %w", err)
}
ns := make(Namespaces, len(names))
@ -52,13 +52,13 @@ func (p Proc) Namespaces() (Namespaces, error) {
fields := strings.SplitN(target, ":", 2)
if len(fields) != 2 {
return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target)
return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target)
}
typ := fields[0]
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
if err != nil {
return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err)
return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err)
}
ns[name] = Namespace{typ, uint32(inode)}

View file

@ -24,11 +24,13 @@ package procfs
// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"github.com/prometheus/procfs/internal/util"
)
const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d"
@ -55,24 +57,21 @@ type PSIStats struct {
// resource from /proc/pressure/<resource>. At time of writing this can be
// either "cpu", "memory" or "io".
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
file, err := os.Open(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
if err != nil {
return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource)
return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err)
}
defer file.Close()
return parsePSIStats(resource, file)
return parsePSIStats(resource, bytes.NewReader(data))
}
// parsePSIStats parses the specified file for pressure stall information
func parsePSIStats(resource string, file io.Reader) (PSIStats, error) {
func parsePSIStats(resource string, r io.Reader) (PSIStats, error) {
psiStats := PSIStats{}
stats, err := ioutil.ReadAll(file)
if err != nil {
return psiStats, fmt.Errorf("psi_stats: unable to read data for %s", resource)
}
for _, l := range strings.Split(string(stats), "\n") {
scanner := bufio.NewScanner(r)
for scanner.Scan() {
l := scanner.Text()
prefix := strings.Split(l, " ")[0]
switch prefix {
case "some":

165
vendor/github.com/prometheus/procfs/proc_smaps.go generated vendored Normal file
View file

@ -0,0 +1,165 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package procfs
import (
"bufio"
"errors"
"fmt"
"os"
"regexp"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
var (
// match the header line before each mapped zone in /proc/pid/smaps
procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
)
type ProcSMapsRollup struct {
// Amount of the mapping that is currently resident in RAM
Rss uint64
// Process's proportional share of this mapping
Pss uint64
// Size in bytes of clean shared pages
SharedClean uint64
// Size in bytes of dirty shared pages
SharedDirty uint64
// Size in bytes of clean private pages
PrivateClean uint64
// Size in bytes of dirty private pages
PrivateDirty uint64
// Amount of memory currently marked as referenced or accessed
Referenced uint64
// Amount of memory that does not belong to any file
Anonymous uint64
// Amount would-be-anonymous memory currently on swap
Swap uint64
// Process's proportional memory on swap
SwapPss uint64
}
// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the
// process.
//
// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will
// we read and summed.
func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) {
data, err := util.ReadFileNoStat(p.path("smaps_rollup"))
if err != nil && os.IsNotExist(err) {
return p.procSMapsRollupManual()
}
if err != nil {
return ProcSMapsRollup{}, err
}
lines := strings.Split(string(data), "\n")
smaps := ProcSMapsRollup{}
// skip first line which don't contains information we need
lines = lines[1:]
for _, line := range lines {
if line == "" {
continue
}
if err := smaps.parseLine(line); err != nil {
return ProcSMapsRollup{}, err
}
}
return smaps, nil
}
// Read /proc/pid/smaps and do the roll-up in Go code.
func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
file, err := os.Open(p.path("smaps"))
if err != nil {
return ProcSMapsRollup{}, err
}
defer file.Close()
smaps := ProcSMapsRollup{}
scan := bufio.NewScanner(file)
for scan.Scan() {
line := scan.Text()
if procSMapsHeaderLine.MatchString(line) {
continue
}
if err := smaps.parseLine(line); err != nil {
return ProcSMapsRollup{}, err
}
}
return smaps, nil
}
func (s *ProcSMapsRollup) parseLine(line string) error {
kv := strings.SplitN(line, ":", 2)
if len(kv) != 2 {
fmt.Println(line)
return errors.New("invalid net/dev line, missing colon")
}
k := kv[0]
if k == "VmFlags" {
return nil
}
v := strings.TrimSpace(kv[1])
v = strings.TrimRight(v, " kB")
vKBytes, err := strconv.ParseUint(v, 10, 64)
if err != nil {
return err
}
vBytes := vKBytes * 1024
s.addValue(k, v, vKBytes, vBytes)
return nil
}
func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) {
switch k {
case "Rss":
s.Rss += vUintBytes
case "Pss":
s.Pss += vUintBytes
case "Shared_Clean":
s.SharedClean += vUintBytes
case "Shared_Dirty":
s.SharedDirty += vUintBytes
case "Private_Clean":
s.PrivateClean += vUintBytes
case "Private_Dirty":
s.PrivateDirty += vUintBytes
case "Referenced":
s.Referenced += vUintBytes
case "Anonymous":
s.Anonymous += vUintBytes
case "Swap":
s.Swap += vUintBytes
case "SwapPss":
s.SwapPss += vUintBytes
}
}

View file

@ -16,10 +16,10 @@ package procfs
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util"
)
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
@ -100,6 +100,15 @@ type ProcStat struct {
VSize uint
// Resident set size in pages.
RSS int
// Soft limit in bytes on the rss of the process.
RSSLimit uint64
// Real-time scheduling priority, a number in the range 1 to 99 for processes
// scheduled under a real-time policy, or 0, for non-real-time processes.
RTPriority uint
// Scheduling policy.
Policy uint
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
DelayAcctBlkIOTicks uint64
proc fs.FS
}
@ -113,19 +122,14 @@ func (p Proc) NewStat() (ProcStat, error) {
// Stat returns the current status information of the process.
func (p Proc) Stat() (ProcStat, error) {
f, err := os.Open(p.path("stat"))
if err != nil {
return ProcStat{}, err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
data, err := util.ReadFileNoStat(p.path("stat"))
if err != nil {
return ProcStat{}, err
}
var (
ignore int
ignoreInt64 int64
ignoreUint64 uint64
s = ProcStat{PID: p.PID, proc: p.fs}
l = bytes.Index(data, []byte("("))
@ -133,10 +137,7 @@ func (p Proc) Stat() (ProcStat, error) {
)
if l < 0 || r < 0 {
return ProcStat{}, fmt.Errorf(
"unexpected format, couldn't extract comm: %s",
data,
)
return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data)
}
s.Comm = string(data[l+1 : r])
@ -160,10 +161,28 @@ func (p Proc) Stat() (ProcStat, error) {
&s.Priority,
&s.Nice,
&s.NumThreads,
&ignore,
&ignoreInt64,
&s.Starttime,
&s.VSize,
&s.RSS,
&s.RSSLimit,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreUint64,
&ignoreInt64,
&ignoreInt64,
&s.RTPriority,
&s.Policy,
&s.DelayAcctBlkIOTicks,
)
if err != nil {
return ProcStat{}, err

View file

@ -15,13 +15,13 @@ package procfs
import (
"bytes"
"io/ioutil"
"os"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// ProcStat provides status information about the process,
// ProcStatus provides status information about the process,
// read from /proc/[pid]/stat.
type ProcStatus struct {
// The process ID.
@ -29,38 +29,41 @@ type ProcStatus struct {
// The process name.
Name string
// Thread group ID.
TGID int
// Peak virtual memory size.
VmPeak uint64
VmPeak uint64 // nolint:golint
// Virtual memory size.
VmSize uint64
VmSize uint64 // nolint:golint
// Locked memory size.
VmLck uint64
VmLck uint64 // nolint:golint
// Pinned memory size.
VmPin uint64
VmPin uint64 // nolint:golint
// Peak resident set size.
VmHWM uint64
VmHWM uint64 // nolint:golint
// Resident set size (sum of RssAnnon RssFile and RssShmem).
VmRSS uint64
VmRSS uint64 // nolint:golint
// Size of resident anonymous memory.
RssAnon uint64
RssAnon uint64 // nolint:golint
// Size of resident file mappings.
RssFile uint64
RssFile uint64 // nolint:golint
// Size of resident shared memory.
RssShmem uint64
RssShmem uint64 // nolint:golint
// Size of data segments.
VmData uint64
VmData uint64 // nolint:golint
// Size of stack segments.
VmStk uint64
VmStk uint64 // nolint:golint
// Size of text segments.
VmExe uint64
VmExe uint64 // nolint:golint
// Shared library code size.
VmLib uint64
VmLib uint64 // nolint:golint
// Page table entries size.
VmPTE uint64
VmPTE uint64 // nolint:golint
// Size of second-level page tables.
VmPMD uint64
VmPMD uint64 // nolint:golint
// Swapped-out virtual memory size by anonymous private.
VmSwap uint64
VmSwap uint64 // nolint:golint
// Size of hugetlb memory portions
HugetlbPages uint64
@ -68,17 +71,16 @@ type ProcStatus struct {
VoluntaryCtxtSwitches uint64
// Number of involuntary context switches.
NonVoluntaryCtxtSwitches uint64
// UIDs of the process (Real, effective, saved set, and filesystem UIDs)
UIDs [4]string
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
GIDs [4]string
}
// NewStatus returns the current status information of the process.
func (p Proc) NewStatus() (ProcStatus, error) {
f, err := os.Open(p.path("status"))
if err != nil {
return ProcStatus{}, err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
data, err := util.ReadFileNoStat(p.path("status"))
if err != nil {
return ProcStatus{}, err
}
@ -113,8 +115,14 @@ func (p Proc) NewStatus() (ProcStatus, error) {
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
switch k {
case "Tgid":
s.TGID = int(vUint)
case "Name":
s.Name = vString
case "Uid":
copy(s.UIDs[:], strings.Split(vString, "\t"))
case "Gid":
copy(s.GIDs[:], strings.Split(vString, "\t"))
case "VmPeak":
s.VmPeak = vUintBytes
case "VmSize":

121
vendor/github.com/prometheus/procfs/schedstat.go generated vendored Normal file
View file

@ -0,0 +1,121 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"errors"
"os"
"regexp"
"strconv"
)
var (
cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`)
procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`)
)
// Schedstat contains scheduler statistics from /proc/schedstat
//
// See
// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt
// for a detailed description of what these numbers mean.
//
// Note the current kernel documentation claims some of the time units are in
// jiffies when they are actually in nanoseconds since 2.6.23 with the
// introduction of CFS. A fix to the documentation is pending. See
// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473
type Schedstat struct {
CPUs []*SchedstatCPU
}
// SchedstatCPU contains the values from one "cpu<N>" line
type SchedstatCPU struct {
CPUNum string
RunningNanoseconds uint64
WaitingNanoseconds uint64
RunTimeslices uint64
}
// ProcSchedstat contains the values from /proc/<pid>/schedstat
type ProcSchedstat struct {
RunningNanoseconds uint64
WaitingNanoseconds uint64
RunTimeslices uint64
}
// Schedstat reads data from /proc/schedstat
func (fs FS) Schedstat() (*Schedstat, error) {
file, err := os.Open(fs.proc.Path("schedstat"))
if err != nil {
return nil, err
}
defer file.Close()
stats := &Schedstat{}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
match := cpuLineRE.FindStringSubmatch(scanner.Text())
if match != nil {
cpu := &SchedstatCPU{}
cpu.CPUNum = match[1]
cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64)
if err != nil {
continue
}
cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64)
if err != nil {
continue
}
cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64)
if err != nil {
continue
}
stats.CPUs = append(stats.CPUs, cpu)
}
}
return stats, nil
}
func parseProcSchedstat(contents string) (ProcSchedstat, error) {
var (
stats ProcSchedstat
err error
)
match := procLineRE.FindStringSubmatch(contents)
if match != nil {
stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64)
if err != nil {
return stats, err
}
stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64)
if err != nil {
return stats, err
}
stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64)
return stats, err
}
return stats, errors.New("could not parse schedstat")
}

151
vendor/github.com/prometheus/procfs/slab.go generated vendored Normal file
View file

@ -0,0 +1,151 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"regexp"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
var (
slabSpace = regexp.MustCompile(`\s+`)
slabVer = regexp.MustCompile(`slabinfo -`)
slabHeader = regexp.MustCompile(`# name`)
)
// Slab represents a slab pool in the kernel.
type Slab struct {
Name string
ObjActive int64
ObjNum int64
ObjSize int64
ObjPerSlab int64
PagesPerSlab int64
// tunables
Limit int64
Batch int64
SharedFactor int64
SlabActive int64
SlabNum int64
SharedAvail int64
}
// SlabInfo represents info for all slabs.
type SlabInfo struct {
Slabs []*Slab
}
func shouldParseSlab(line string) bool {
if slabVer.MatchString(line) {
return false
}
if slabHeader.MatchString(line) {
return false
}
return true
}
// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1.
func parseV21SlabEntry(line string) (*Slab, error) {
// First cleanup whitespace.
l := slabSpace.ReplaceAllString(line, " ")
s := strings.Split(l, " ")
if len(s) != 16 {
return nil, fmt.Errorf("unable to parse: %q", line)
}
var err error
i := &Slab{Name: s[0]}
i.ObjActive, err = strconv.ParseInt(s[1], 10, 64)
if err != nil {
return nil, err
}
i.ObjNum, err = strconv.ParseInt(s[2], 10, 64)
if err != nil {
return nil, err
}
i.ObjSize, err = strconv.ParseInt(s[3], 10, 64)
if err != nil {
return nil, err
}
i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64)
if err != nil {
return nil, err
}
i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64)
if err != nil {
return nil, err
}
i.Limit, err = strconv.ParseInt(s[8], 10, 64)
if err != nil {
return nil, err
}
i.Batch, err = strconv.ParseInt(s[9], 10, 64)
if err != nil {
return nil, err
}
i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64)
if err != nil {
return nil, err
}
i.SlabActive, err = strconv.ParseInt(s[13], 10, 64)
if err != nil {
return nil, err
}
i.SlabNum, err = strconv.ParseInt(s[14], 10, 64)
if err != nil {
return nil, err
}
i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64)
if err != nil {
return nil, err
}
return i, nil
}
// parseSlabInfo21 is used to parse a slabinfo 2.1 file.
func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) {
scanner := bufio.NewScanner(r)
s := SlabInfo{Slabs: []*Slab{}}
for scanner.Scan() {
line := scanner.Text()
if !shouldParseSlab(line) {
continue
}
slab, err := parseV21SlabEntry(line)
if err != nil {
return s, err
}
s.Slabs = append(s.Slabs, slab)
}
return s, nil
}
// SlabInfo reads data from /proc/slabinfo
func (fs FS) SlabInfo() (SlabInfo, error) {
// TODO: Consider passing options to allow for parsing different
// slabinfo versions. However, slabinfo 2.1 has been stable since
// kernel 2.6.10 and later.
data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo"))
if err != nil {
return SlabInfo{}, err
}
return parseSlabInfo21(bytes.NewReader(data))
}

View file

@ -15,13 +15,14 @@ package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util"
)
// CPUStat shows how much time the cpu spend in various stages.
@ -92,10 +93,10 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
&cpuStat.Guest, &cpuStat.GuestNice)
if err != nil && err != io.EOF {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err)
return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err)
}
if count == 0 {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line)
return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line)
}
cpuStat.User /= userHZ
@ -115,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
if err != nil {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err)
return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err)
}
return cpuStat, cpuID, nil
@ -135,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
&softIRQStat.Hrtimer, &softIRQStat.Rcu)
if err != nil {
return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err)
return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err)
}
return softIRQStat, total, nil
@ -164,16 +165,15 @@ func (fs FS) NewStat() (Stat, error) {
// Stat returns information about current cpu/process statistics.
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
func (fs FS) Stat() (Stat, error) {
f, err := os.Open(fs.proc.Path("stat"))
fileName := fs.proc.Path("stat")
data, err := util.ReadFileNoStat(fileName)
if err != nil {
return Stat{}, err
}
defer f.Close()
stat := Stat{}
scanner := bufio.NewScanner(f)
scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
line := scanner.Text()
parts := strings.Fields(scanner.Text())
@ -184,34 +184,34 @@ func (fs FS) Stat() (Stat, error) {
switch {
case parts[0] == "btime":
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err)
return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err)
}
case parts[0] == "intr":
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err)
return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err)
}
numberedIRQs := parts[2:]
stat.IRQ = make([]uint64, len(numberedIRQs))
for i, count := range numberedIRQs {
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err)
return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err)
}
}
case parts[0] == "ctxt":
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err)
return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err)
}
case parts[0] == "processes":
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err)
return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err)
}
case parts[0] == "procs_running":
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err)
return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err)
}
case parts[0] == "procs_blocked":
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err)
return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err)
}
case parts[0] == "softirq":
softIRQStats, total, err := parseSoftIRQStat(line)
@ -237,7 +237,7 @@ func (fs FS) Stat() (Stat, error) {
}
if err := scanner.Err(); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err)
}
return stat, nil

89
vendor/github.com/prometheus/procfs/swaps.go generated vendored Normal file
View file

@ -0,0 +1,89 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Swap represents an entry in /proc/swaps.
type Swap struct {
Filename string
Type string
Size int
Used int
Priority int
}
// Swaps returns a slice of all configured swap devices on the system.
func (fs FS) Swaps() ([]*Swap, error) {
data, err := util.ReadFileNoStat(fs.proc.Path("swaps"))
if err != nil {
return nil, err
}
return parseSwaps(data)
}
func parseSwaps(info []byte) ([]*Swap, error) {
swaps := []*Swap{}
scanner := bufio.NewScanner(bytes.NewReader(info))
scanner.Scan() // ignore header line
for scanner.Scan() {
swapString := scanner.Text()
parsedSwap, err := parseSwapString(swapString)
if err != nil {
return nil, err
}
swaps = append(swaps, parsedSwap)
}
err := scanner.Err()
return swaps, err
}
func parseSwapString(swapString string) (*Swap, error) {
var err error
swapFields := strings.Fields(swapString)
swapLength := len(swapFields)
if swapLength < 5 {
return nil, fmt.Errorf("too few fields in swap string: %s", swapString)
}
swap := &Swap{
Filename: swapFields[0],
Type: swapFields[1],
}
swap.Size, err = strconv.Atoi(swapFields[2])
if err != nil {
return nil, fmt.Errorf("invalid swap size: %s", swapFields[2])
}
swap.Used, err = strconv.Atoi(swapFields[3])
if err != nil {
return nil, fmt.Errorf("invalid swap used: %s", swapFields[3])
}
swap.Priority, err = strconv.Atoi(swapFields[4])
if err != nil {
return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4])
}
return swap, nil
}

210
vendor/github.com/prometheus/procfs/vm.go generated vendored Normal file
View file

@ -0,0 +1,210 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package procfs
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// The VM interface is described at
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
// Each setting is exposed as a single file.
// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
// and numa_zonelist_order (deprecated) which is a string
type VM struct {
AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes
BlockDump *int64 // /proc/sys/vm/block_dump
CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed
DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes
DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio
DirtyBytes *int64 // /proc/sys/vm/dirty_bytes
DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs
DirtyRatio *int64 // /proc/sys/vm/dirty_ratio
DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds
DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs
DropCaches *int64 // /proc/sys/vm/drop_caches
ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold
HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group
LaptopMode *int64 // /proc/sys/vm/laptop_mode
LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout
LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio
MaxMapCount *int64 // /proc/sys/vm/max_map_count
MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill
MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery
MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes
MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio
MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio
MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr
NrHugepages *int64 // /proc/sys/vm/nr_hugepages
NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy
NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages
NumaStat *int64 // /proc/sys/vm/numa_stat
NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order
OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks
OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task
OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes
OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory
OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio
PageCluster *int64 // /proc/sys/vm/page-cluster
PanicOnOom *int64 // /proc/sys/vm/panic_on_oom
PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction
StatInterval *int64 // /proc/sys/vm/stat_interval
Swappiness *int64 // /proc/sys/vm/swappiness
UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes
VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure
WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor
WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor
ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode
}
// VM reads the VM statistics from the specified `proc` filesystem.
func (fs FS) VM() (*VM, error) {
path := fs.proc.Path("sys/vm")
file, err := os.Stat(path)
if err != nil {
return nil, err
}
if !file.Mode().IsDir() {
return nil, fmt.Errorf("%s is not a directory", path)
}
files, err := ioutil.ReadDir(path)
if err != nil {
return nil, err
}
var vm VM
for _, f := range files {
if f.IsDir() {
continue
}
name := filepath.Join(path, f.Name())
// ignore errors on read, as there are some write only
// in /proc/sys/vm
value, err := util.SysReadFile(name)
if err != nil {
continue
}
vp := util.NewValueParser(value)
switch f.Name() {
case "admin_reserve_kbytes":
vm.AdminReserveKbytes = vp.PInt64()
case "block_dump":
vm.BlockDump = vp.PInt64()
case "compact_unevictable_allowed":
vm.CompactUnevictableAllowed = vp.PInt64()
case "dirty_background_bytes":
vm.DirtyBackgroundBytes = vp.PInt64()
case "dirty_background_ratio":
vm.DirtyBackgroundRatio = vp.PInt64()
case "dirty_bytes":
vm.DirtyBytes = vp.PInt64()
case "dirty_expire_centisecs":
vm.DirtyExpireCentisecs = vp.PInt64()
case "dirty_ratio":
vm.DirtyRatio = vp.PInt64()
case "dirtytime_expire_seconds":
vm.DirtytimeExpireSeconds = vp.PInt64()
case "dirty_writeback_centisecs":
vm.DirtyWritebackCentisecs = vp.PInt64()
case "drop_caches":
vm.DropCaches = vp.PInt64()
case "extfrag_threshold":
vm.ExtfragThreshold = vp.PInt64()
case "hugetlb_shm_group":
vm.HugetlbShmGroup = vp.PInt64()
case "laptop_mode":
vm.LaptopMode = vp.PInt64()
case "legacy_va_layout":
vm.LegacyVaLayout = vp.PInt64()
case "lowmem_reserve_ratio":
stringSlice := strings.Fields(value)
pint64Slice := make([]*int64, 0, len(stringSlice))
for _, value := range stringSlice {
vp := util.NewValueParser(value)
pint64Slice = append(pint64Slice, vp.PInt64())
}
vm.LowmemReserveRatio = pint64Slice
case "max_map_count":
vm.MaxMapCount = vp.PInt64()
case "memory_failure_early_kill":
vm.MemoryFailureEarlyKill = vp.PInt64()
case "memory_failure_recovery":
vm.MemoryFailureRecovery = vp.PInt64()
case "min_free_kbytes":
vm.MinFreeKbytes = vp.PInt64()
case "min_slab_ratio":
vm.MinSlabRatio = vp.PInt64()
case "min_unmapped_ratio":
vm.MinUnmappedRatio = vp.PInt64()
case "mmap_min_addr":
vm.MmapMinAddr = vp.PInt64()
case "nr_hugepages":
vm.NrHugepages = vp.PInt64()
case "nr_hugepages_mempolicy":
vm.NrHugepagesMempolicy = vp.PInt64()
case "nr_overcommit_hugepages":
vm.NrOvercommitHugepages = vp.PInt64()
case "numa_stat":
vm.NumaStat = vp.PInt64()
case "numa_zonelist_order":
vm.NumaZonelistOrder = value
case "oom_dump_tasks":
vm.OomDumpTasks = vp.PInt64()
case "oom_kill_allocating_task":
vm.OomKillAllocatingTask = vp.PInt64()
case "overcommit_kbytes":
vm.OvercommitKbytes = vp.PInt64()
case "overcommit_memory":
vm.OvercommitMemory = vp.PInt64()
case "overcommit_ratio":
vm.OvercommitRatio = vp.PInt64()
case "page-cluster":
vm.PageCluster = vp.PInt64()
case "panic_on_oom":
vm.PanicOnOom = vp.PInt64()
case "percpu_pagelist_fraction":
vm.PercpuPagelistFraction = vp.PInt64()
case "stat_interval":
vm.StatInterval = vp.PInt64()
case "swappiness":
vm.Swappiness = vp.PInt64()
case "user_reserve_kbytes":
vm.UserReserveKbytes = vp.PInt64()
case "vfs_cache_pressure":
vm.VfsCachePressure = vp.PInt64()
case "watermark_boost_factor":
vm.WatermarkBoostFactor = vp.PInt64()
case "watermark_scale_factor":
vm.WatermarkScaleFactor = vp.PInt64()
case "zone_reclaim_mode":
vm.ZoneReclaimMode = vp.PInt64()
}
if err := vp.Err(); err != nil {
return nil, err
}
}
return &vm, nil
}

Some files were not shown because too many files have changed in this diff Show more