update deps (#686)

This commit is contained in:
Miek Gieben 2017-06-01 18:11:50 +01:00 committed by GitHub
parent 30ecb83dce
commit 3752a97135
6538 changed files with 9 additions and 2217448 deletions

View file

@ -1,345 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package prometheus provides bindings to the Prometheus HTTP API:
// http://prometheus.io/docs/querying/api/
package prometheus
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
"github.com/prometheus/common/model"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
)
const (
statusAPIError = 422
apiPrefix = "/api/v1"
epQuery = "/query"
epQueryRange = "/query_range"
epLabelValues = "/label/:name/values"
epSeries = "/series"
)
type ErrorType string
const (
// The different API error types.
ErrBadData ErrorType = "bad_data"
ErrTimeout = "timeout"
ErrCanceled = "canceled"
ErrExec = "execution"
ErrBadResponse = "bad_response"
)
// Error is an error returned by the API.
type Error struct {
Type ErrorType
Msg string
}
func (e *Error) Error() string {
return fmt.Sprintf("%s: %s", e.Type, e.Msg)
}
// CancelableTransport is like net.Transport but provides
// per-request cancelation functionality.
type CancelableTransport interface {
http.RoundTripper
CancelRequest(req *http.Request)
}
var DefaultTransport CancelableTransport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
}
// Config defines configuration parameters for a new client.
type Config struct {
// The address of the Prometheus to connect to.
Address string
// Transport is used by the Client to drive HTTP requests. If not
// provided, DefaultTransport will be used.
Transport CancelableTransport
}
func (cfg *Config) transport() CancelableTransport {
if cfg.Transport == nil {
return DefaultTransport
}
return cfg.Transport
}
type Client interface {
url(ep string, args map[string]string) *url.URL
do(context.Context, *http.Request) (*http.Response, []byte, error)
}
// New returns a new Client.
//
// It is safe to use the returned Client from multiple goroutines.
func New(cfg Config) (Client, error) {
u, err := url.Parse(cfg.Address)
if err != nil {
return nil, err
}
u.Path = strings.TrimRight(u.Path, "/") + apiPrefix
return &httpClient{
endpoint: u,
transport: cfg.transport(),
}, nil
}
type httpClient struct {
endpoint *url.URL
transport CancelableTransport
}
func (c *httpClient) url(ep string, args map[string]string) *url.URL {
p := path.Join(c.endpoint.Path, ep)
for arg, val := range args {
arg = ":" + arg
p = strings.Replace(p, arg, val, -1)
}
u := *c.endpoint
u.Path = p
return &u
}
func (c *httpClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
resp, err := ctxhttp.Do(ctx, &http.Client{Transport: c.transport}, req)
defer func() {
if resp != nil {
resp.Body.Close()
}
}()
if err != nil {
return nil, nil, err
}
var body []byte
done := make(chan struct{})
go func() {
body, err = ioutil.ReadAll(resp.Body)
close(done)
}()
select {
case <-ctx.Done():
err = resp.Body.Close()
<-done
if err == nil {
err = ctx.Err()
}
case <-done:
}
return resp, body, err
}
// apiClient wraps a regular client and processes successful API responses.
// Successful also includes responses that errored at the API level.
type apiClient struct {
Client
}
type apiResponse struct {
Status string `json:"status"`
Data json.RawMessage `json:"data"`
ErrorType ErrorType `json:"errorType"`
Error string `json:"error"`
}
func (c apiClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
resp, body, err := c.Client.do(ctx, req)
if err != nil {
return resp, body, err
}
code := resp.StatusCode
if code/100 != 2 && code != statusAPIError {
return resp, body, &Error{
Type: ErrBadResponse,
Msg: fmt.Sprintf("bad response code %d", resp.StatusCode),
}
}
var result apiResponse
if err = json.Unmarshal(body, &result); err != nil {
return resp, body, &Error{
Type: ErrBadResponse,
Msg: err.Error(),
}
}
if (code == statusAPIError) != (result.Status == "error") {
err = &Error{
Type: ErrBadResponse,
Msg: "inconsistent body for response code",
}
}
if code == statusAPIError && result.Status == "error" {
err = &Error{
Type: result.ErrorType,
Msg: result.Error,
}
}
return resp, []byte(result.Data), err
}
// Range represents a sliced time range.
type Range struct {
// The boundaries of the time range.
Start, End time.Time
// The maximum time between two slices within the boundaries.
Step time.Duration
}
// queryResult contains result data for a query.
type queryResult struct {
Type model.ValueType `json:"resultType"`
Result interface{} `json:"result"`
// The decoded value.
v model.Value
}
func (qr *queryResult) UnmarshalJSON(b []byte) error {
v := struct {
Type model.ValueType `json:"resultType"`
Result json.RawMessage `json:"result"`
}{}
err := json.Unmarshal(b, &v)
if err != nil {
return err
}
switch v.Type {
case model.ValScalar:
var sv model.Scalar
err = json.Unmarshal(v.Result, &sv)
qr.v = &sv
case model.ValVector:
var vv model.Vector
err = json.Unmarshal(v.Result, &vv)
qr.v = vv
case model.ValMatrix:
var mv model.Matrix
err = json.Unmarshal(v.Result, &mv)
qr.v = mv
default:
err = fmt.Errorf("unexpected value type %q", v.Type)
}
return err
}
// QueryAPI provides bindings the Prometheus's query API.
type QueryAPI interface {
// Query performs a query for the given time.
Query(ctx context.Context, query string, ts time.Time) (model.Value, error)
// Query performs a query for the given range.
QueryRange(ctx context.Context, query string, r Range) (model.Value, error)
}
// NewQueryAPI returns a new QueryAPI for the client.
//
// It is safe to use the returned QueryAPI from multiple goroutines.
func NewQueryAPI(c Client) QueryAPI {
return &httpQueryAPI{client: apiClient{c}}
}
type httpQueryAPI struct {
client Client
}
func (h *httpQueryAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, error) {
u := h.client.url(epQuery, nil)
q := u.Query()
q.Set("query", query)
q.Set("time", ts.Format(time.RFC3339Nano))
u.RawQuery = q.Encode()
req, _ := http.NewRequest("GET", u.String(), nil)
_, body, err := h.client.do(ctx, req)
if err != nil {
return nil, err
}
var qres queryResult
err = json.Unmarshal(body, &qres)
return model.Value(qres.v), err
}
func (h *httpQueryAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, error) {
u := h.client.url(epQueryRange, nil)
q := u.Query()
var (
start = r.Start.Format(time.RFC3339Nano)
end = r.End.Format(time.RFC3339Nano)
step = strconv.FormatFloat(r.Step.Seconds(), 'f', 3, 64)
)
q.Set("query", query)
q.Set("start", start)
q.Set("end", end)
q.Set("step", step)
u.RawQuery = q.Encode()
req, _ := http.NewRequest("GET", u.String(), nil)
_, body, err := h.client.do(ctx, req)
if err != nil {
return nil, err
}
var qres queryResult
err = json.Unmarshal(body, &qres)
return model.Value(qres.v), err
}

View file

@ -1,453 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"reflect"
"testing"
"time"
"github.com/prometheus/common/model"
"golang.org/x/net/context"
)
func TestConfig(t *testing.T) {
c := Config{}
if c.transport() != DefaultTransport {
t.Fatalf("expected default transport for nil Transport field")
}
}
func TestClientURL(t *testing.T) {
tests := []struct {
address string
endpoint string
args map[string]string
expected string
}{
{
address: "http://localhost:9090",
endpoint: "/test",
expected: "http://localhost:9090/test",
},
{
address: "http://localhost",
endpoint: "/test",
expected: "http://localhost/test",
},
{
address: "http://localhost:9090",
endpoint: "test",
expected: "http://localhost:9090/test",
},
{
address: "http://localhost:9090/prefix",
endpoint: "/test",
expected: "http://localhost:9090/prefix/test",
},
{
address: "https://localhost:9090/",
endpoint: "/test/",
expected: "https://localhost:9090/test",
},
{
address: "http://localhost:9090",
endpoint: "/test/:param",
args: map[string]string{
"param": "content",
},
expected: "http://localhost:9090/test/content",
},
{
address: "http://localhost:9090",
endpoint: "/test/:param/more/:param",
args: map[string]string{
"param": "content",
},
expected: "http://localhost:9090/test/content/more/content",
},
{
address: "http://localhost:9090",
endpoint: "/test/:param/more/:foo",
args: map[string]string{
"param": "content",
"foo": "bar",
},
expected: "http://localhost:9090/test/content/more/bar",
},
{
address: "http://localhost:9090",
endpoint: "/test/:param",
args: map[string]string{
"nonexistant": "content",
},
expected: "http://localhost:9090/test/:param",
},
}
for _, test := range tests {
ep, err := url.Parse(test.address)
if err != nil {
t.Fatal(err)
}
hclient := &httpClient{
endpoint: ep,
transport: DefaultTransport,
}
u := hclient.url(test.endpoint, test.args)
if u.String() != test.expected {
t.Errorf("unexpected result: got %s, want %s", u, test.expected)
continue
}
// The apiClient must return exactly the same result as the httpClient.
aclient := &apiClient{hclient}
u = aclient.url(test.endpoint, test.args)
if u.String() != test.expected {
t.Errorf("unexpected result: got %s, want %s", u, test.expected)
}
}
}
type testClient struct {
*testing.T
ch chan apiClientTest
req *http.Request
}
type apiClientTest struct {
code int
response interface{}
expected string
err *Error
}
func (c *testClient) url(ep string, args map[string]string) *url.URL {
return nil
}
func (c *testClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
if ctx == nil {
c.Fatalf("context was not passed down")
}
if req != c.req {
c.Fatalf("request was not passed down")
}
test := <-c.ch
var b []byte
var err error
switch v := test.response.(type) {
case string:
b = []byte(v)
default:
b, err = json.Marshal(v)
if err != nil {
c.Fatal(err)
}
}
resp := &http.Response{
StatusCode: test.code,
}
return resp, b, nil
}
func TestAPIClientDo(t *testing.T) {
tests := []apiClientTest{
{
response: &apiResponse{
Status: "error",
Data: json.RawMessage(`null`),
ErrorType: ErrBadData,
Error: "failed",
},
err: &Error{
Type: ErrBadData,
Msg: "failed",
},
code: statusAPIError,
expected: `null`,
},
{
response: &apiResponse{
Status: "error",
Data: json.RawMessage(`"test"`),
ErrorType: ErrTimeout,
Error: "timed out",
},
err: &Error{
Type: ErrTimeout,
Msg: "timed out",
},
code: statusAPIError,
expected: `test`,
},
{
response: "bad json",
err: &Error{
Type: ErrBadResponse,
Msg: "bad response code 400",
},
code: http.StatusBadRequest,
},
{
response: "bad json",
err: &Error{
Type: ErrBadResponse,
Msg: "invalid character 'b' looking for beginning of value",
},
code: statusAPIError,
},
{
response: &apiResponse{
Status: "success",
Data: json.RawMessage(`"test"`),
},
err: &Error{
Type: ErrBadResponse,
Msg: "inconsistent body for response code",
},
code: statusAPIError,
},
{
response: &apiResponse{
Status: "success",
Data: json.RawMessage(`"test"`),
ErrorType: ErrTimeout,
Error: "timed out",
},
err: &Error{
Type: ErrBadResponse,
Msg: "inconsistent body for response code",
},
code: statusAPIError,
},
{
response: &apiResponse{
Status: "error",
Data: json.RawMessage(`"test"`),
ErrorType: ErrTimeout,
Error: "timed out",
},
err: &Error{
Type: ErrBadResponse,
Msg: "inconsistent body for response code",
},
code: http.StatusOK,
},
}
tc := &testClient{
T: t,
ch: make(chan apiClientTest, 1),
req: &http.Request{},
}
client := &apiClient{tc}
for _, test := range tests {
tc.ch <- test
_, body, err := client.do(context.Background(), tc.req)
if test.err != nil {
if err == nil {
t.Errorf("expected error %q but got none", test.err)
continue
}
if test.err.Error() != err.Error() {
t.Errorf("unexpected error: want %q, got %q", test.err, err)
}
continue
}
if err != nil {
t.Errorf("unexpeceted error %s", err)
continue
}
want, got := test.expected, string(body)
if want != got {
t.Errorf("unexpected body: want %q, got %q", want, got)
}
}
}
type apiTestClient struct {
*testing.T
curTest apiTest
}
type apiTest struct {
do func() (interface{}, error)
inErr error
inRes interface{}
reqPath string
reqParam url.Values
reqMethod string
res interface{}
err error
}
func (c *apiTestClient) url(ep string, args map[string]string) *url.URL {
u := &url.URL{
Host: "test:9090",
Path: apiPrefix + ep,
}
return u
}
func (c *apiTestClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
test := c.curTest
if req.URL.Path != test.reqPath {
c.Errorf("unexpected request path: want %s, got %s", test.reqPath, req.URL.Path)
}
if req.Method != test.reqMethod {
c.Errorf("unexpected request method: want %s, got %s", test.reqMethod, req.Method)
}
b, err := json.Marshal(test.inRes)
if err != nil {
c.Fatal(err)
}
resp := &http.Response{}
if test.inErr != nil {
resp.StatusCode = statusAPIError
} else {
resp.StatusCode = http.StatusOK
}
return resp, b, test.inErr
}
func TestAPIs(t *testing.T) {
testTime := time.Now()
client := &apiTestClient{T: t}
queryApi := &httpQueryAPI{
client: client,
}
doQuery := func(q string, ts time.Time) func() (interface{}, error) {
return func() (interface{}, error) {
return queryApi.Query(context.Background(), q, ts)
}
}
doQueryRange := func(q string, rng Range) func() (interface{}, error) {
return func() (interface{}, error) {
return queryApi.QueryRange(context.Background(), q, rng)
}
}
queryTests := []apiTest{
{
do: doQuery("2", testTime),
inRes: &queryResult{
Type: model.ValScalar,
Result: &model.Scalar{
Value: 2,
Timestamp: model.TimeFromUnix(testTime.Unix()),
},
},
reqMethod: "GET",
reqPath: "/api/v1/query",
reqParam: url.Values{
"query": []string{"2"},
"time": []string{testTime.Format(time.RFC3339Nano)},
},
res: &model.Scalar{
Value: 2,
Timestamp: model.TimeFromUnix(testTime.Unix()),
},
},
{
do: doQuery("2", testTime),
inErr: fmt.Errorf("some error"),
reqMethod: "GET",
reqPath: "/api/v1/query",
reqParam: url.Values{
"query": []string{"2"},
"time": []string{testTime.Format(time.RFC3339Nano)},
},
err: fmt.Errorf("some error"),
},
{
do: doQueryRange("2", Range{
Start: testTime.Add(-time.Minute),
End: testTime,
Step: time.Minute,
}),
inErr: fmt.Errorf("some error"),
reqMethod: "GET",
reqPath: "/api/v1/query_range",
reqParam: url.Values{
"query": []string{"2"},
"start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
"end": []string{testTime.Format(time.RFC3339Nano)},
"step": []string{time.Minute.String()},
},
err: fmt.Errorf("some error"),
},
}
var tests []apiTest
tests = append(tests, queryTests...)
for _, test := range tests {
client.curTest = test
res, err := test.do()
if test.err != nil {
if err == nil {
t.Errorf("expected error %q but got none", test.err)
continue
}
if err.Error() != test.err.Error() {
t.Errorf("unexpected error: want %s, got %s", test.err, err)
}
continue
}
if err != nil {
t.Errorf("unexpected error: %s", err)
continue
}
if !reflect.DeepEqual(res, test.res) {
t.Errorf("unexpected result: want %v, got %v", test.res, res)
}
}
}

View file

@ -1,103 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// A simple example exposing fictional RPC latencies with different types of
// random distributions (uniform, normal, and exponential) as Prometheus
// metrics.
package main
import (
"flag"
"math"
"math/rand"
"net/http"
"time"
"github.com/prometheus/client_golang/prometheus"
)
var (
addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.")
uniformDomain = flag.Float64("uniform.domain", 200, "The domain for the uniform distribution.")
normDomain = flag.Float64("normal.domain", 200, "The domain for the normal distribution.")
normMean = flag.Float64("normal.mean", 10, "The mean for the normal distribution.")
oscillationPeriod = flag.Duration("oscillation-period", 10*time.Minute, "The duration of the rate oscillation period.")
)
var (
// Create a summary to track fictional interservice RPC latencies for three
// distinct services with different latency distributions. These services are
// differentiated via a "service" label.
rpcDurations = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Name: "rpc_durations_microseconds",
Help: "RPC latency distributions.",
},
[]string{"service"},
)
// The same as above, but now as a histogram, and only for the normal
// distribution. The buckets are targeted to the parameters of the
// normal distribution, with 20 buckets centered on the mean, each
// half-sigma wide.
rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "rpc_durations_histogram_microseconds",
Help: "RPC latency distributions.",
Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20),
})
)
func init() {
// Register the summary and the histogram with Prometheus's default registry.
prometheus.MustRegister(rpcDurations)
prometheus.MustRegister(rpcDurationsHistogram)
}
func main() {
flag.Parse()
start := time.Now()
oscillationFactor := func() float64 {
return 2 + math.Sin(math.Sin(2*math.Pi*float64(time.Since(start))/float64(*oscillationPeriod)))
}
// Periodically record some sample latencies for the three services.
go func() {
for {
v := rand.Float64() * *uniformDomain
rpcDurations.WithLabelValues("uniform").Observe(v)
time.Sleep(time.Duration(100*oscillationFactor()) * time.Millisecond)
}
}()
go func() {
for {
v := (rand.NormFloat64() * *normDomain) + *normMean
rpcDurations.WithLabelValues("normal").Observe(v)
rpcDurationsHistogram.Observe(v)
time.Sleep(time.Duration(75*oscillationFactor()) * time.Millisecond)
}
}()
go func() {
for {
v := rand.ExpFloat64()
rpcDurations.WithLabelValues("exponential").Observe(v)
time.Sleep(time.Duration(50*oscillationFactor()) * time.Millisecond)
}
}()
// Expose the registered metrics via HTTP.
http.Handle("/metrics", prometheus.Handler())
http.ListenAndServe(*addr, nil)
}

View file

@ -1,30 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// A minimal example of how to include Prometheus instrumentation.
package main
import (
"flag"
"net/http"
"github.com/prometheus/client_golang/prometheus"
)
var addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.")
func main() {
flag.Parse()
http.Handle("/metrics", prometheus.Handler())
http.ListenAndServe(*addr, nil)
}

View file

@ -1,201 +0,0 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright (c) 2013, The Prometheus Authors
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
// Package promhttp contains functions to create http.Handler instances to
// expose Prometheus metrics via HTTP. In later versions of this package, it
// will also contain tooling to instrument instances of http.Handler and
// http.RoundTripper.
//
// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor,
// you can create a handler for a custom registry or anything that implements
// the Gatherer interface. It also allows to create handlers that act
// differently on errors or allow to log errors.
package promhttp
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"net/http"
"strings"
"sync"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/client_golang/prometheus"
)
const (
contentTypeHeader = "Content-Type"
contentLengthHeader = "Content-Length"
contentEncodingHeader = "Content-Encoding"
acceptEncodingHeader = "Accept-Encoding"
)
var bufPool sync.Pool
func getBuf() *bytes.Buffer {
buf := bufPool.Get()
if buf == nil {
return &bytes.Buffer{}
}
return buf.(*bytes.Buffer)
}
func giveBuf(buf *bytes.Buffer) {
buf.Reset()
bufPool.Put(buf)
}
// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The
// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP
// error, no error logging, and compression if requested by the client.
//
// If you want to create a Handler for the DefaultGatherer with different
// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and
// your desired HandlerOpts.
func Handler() http.Handler {
return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{})
}
// HandlerFor returns an http.Handler for the provided Gatherer. The behavior
// of the Handler is defined by the provided HandlerOpts.
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
mfs, err := reg.Gather()
if err != nil {
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error gathering metrics:", err)
}
switch opts.ErrorHandling {
case PanicOnError:
panic(err)
case ContinueOnError:
if len(mfs) == 0 {
http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
case HTTPErrorOnError:
http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
}
contentType := expfmt.Negotiate(req.Header)
buf := getBuf()
defer giveBuf(buf)
writer, encoding := decorateWriter(req, buf, opts.DisableCompression)
enc := expfmt.NewEncoder(writer, contentType)
var lastErr error
for _, mf := range mfs {
if err := enc.Encode(mf); err != nil {
lastErr = err
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error encoding metric family:", err)
}
switch opts.ErrorHandling {
case PanicOnError:
panic(err)
case ContinueOnError:
// Handled later.
case HTTPErrorOnError:
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
}
}
if closer, ok := writer.(io.Closer); ok {
closer.Close()
}
if lastErr != nil && buf.Len() == 0 {
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
header := w.Header()
header.Set(contentTypeHeader, string(contentType))
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
if encoding != "" {
header.Set(contentEncodingHeader, encoding)
}
w.Write(buf.Bytes())
// TODO(beorn7): Consider streaming serving of metrics.
})
}
// HandlerErrorHandling defines how a Handler serving metrics will handle
// errors.
type HandlerErrorHandling int
// These constants cause handlers serving metrics to behave as described if
// errors are encountered.
const (
// Serve an HTTP status code 500 upon the first error
// encountered. Report the error message in the body.
HTTPErrorOnError HandlerErrorHandling = iota
// Ignore errors and try to serve as many metrics as possible. However,
// if no metrics can be served, serve an HTTP status code 500 and the
// last error message in the body. Only use this in deliberate "best
// effort" metrics collection scenarios. It is recommended to at least
// log errors (by providing an ErrorLog in HandlerOpts) to not mask
// errors completely.
ContinueOnError
// Panic upon the first error encountered (useful for "crash only" apps).
PanicOnError
)
// Logger is the minimal interface HandlerOpts needs for logging. Note that
// log.Logger from the standard library implements this interface, and it is
// easy to implement by custom loggers, if they don't do so already anyway.
type Logger interface {
Println(v ...interface{})
}
// HandlerOpts specifies options how to serve metrics via an http.Handler. The
// zero value of HandlerOpts is a reasonable default.
type HandlerOpts struct {
// ErrorLog specifies an optional logger for errors collecting and
// serving metrics. If nil, errors are not logged at all.
ErrorLog Logger
// ErrorHandling defines how errors are handled. Note that errors are
// logged regardless of the configured ErrorHandling provided ErrorLog
// is not nil.
ErrorHandling HandlerErrorHandling
// If DisableCompression is true, the handler will never compress the
// response, even if requested by the client.
DisableCompression bool
}
// decorateWriter wraps a writer to handle gzip compression if requested. It
// returns the decorated writer and the appropriate "Content-Encoding" header
// (which is empty if no compression is enabled).
func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) {
if compressionDisabled {
return writer, ""
}
header := request.Header.Get(acceptEncodingHeader)
parts := strings.Split(header, ",")
for _, part := range parts {
part := strings.TrimSpace(part)
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
return gzip.NewWriter(writer), "gzip"
}
}
return writer, ""
}

View file

@ -1,137 +0,0 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright (c) 2013, The Prometheus Authors
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
package promhttp
import (
"bytes"
"errors"
"log"
"net/http"
"net/http/httptest"
"testing"
"github.com/prometheus/client_golang/prometheus"
)
type errorCollector struct{}
func (e errorCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- prometheus.NewDesc("invalid_metric", "not helpful", nil, nil)
}
func (e errorCollector) Collect(ch chan<- prometheus.Metric) {
ch <- prometheus.NewInvalidMetric(
prometheus.NewDesc("invalid_metric", "not helpful", nil, nil),
errors.New("collect error"),
)
}
func TestHandlerErrorHandling(t *testing.T) {
// Create a registry that collects a MetricFamily with two elements,
// another with one, and reports an error.
reg := prometheus.NewRegistry()
cnt := prometheus.NewCounter(prometheus.CounterOpts{
Name: "the_count",
Help: "Ah-ah-ah! Thunder and lightning!",
})
reg.MustRegister(cnt)
cntVec := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "name",
Help: "docstring",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
},
[]string{"labelname"},
)
cntVec.WithLabelValues("val1").Inc()
cntVec.WithLabelValues("val2").Inc()
reg.MustRegister(cntVec)
reg.MustRegister(errorCollector{})
logBuf := &bytes.Buffer{}
logger := log.New(logBuf, "", 0)
writer := httptest.NewRecorder()
request, _ := http.NewRequest("GET", "/", nil)
request.Header.Add("Accept", "test/plain")
errorHandler := HandlerFor(reg, HandlerOpts{
ErrorLog: logger,
ErrorHandling: HTTPErrorOnError,
})
continueHandler := HandlerFor(reg, HandlerOpts{
ErrorLog: logger,
ErrorHandling: ContinueOnError,
})
panicHandler := HandlerFor(reg, HandlerOpts{
ErrorLog: logger,
ErrorHandling: PanicOnError,
})
wantMsg := `error gathering metrics: error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error
`
wantErrorBody := `An error has occurred during metrics gathering:
error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error
`
wantOKBody := `# HELP name docstring
# TYPE name counter
name{constname="constvalue",labelname="val1"} 1
name{constname="constvalue",labelname="val2"} 1
# HELP the_count Ah-ah-ah! Thunder and lightning!
# TYPE the_count counter
the_count 0
`
errorHandler.ServeHTTP(writer, request)
if got, want := writer.Code, http.StatusInternalServerError; got != want {
t.Errorf("got HTTP status code %d, want %d", got, want)
}
if got := logBuf.String(); got != wantMsg {
t.Errorf("got log message:\n%s\nwant log mesage:\n%s\n", got, wantMsg)
}
if got := writer.Body.String(); got != wantErrorBody {
t.Errorf("got body:\n%s\nwant body:\n%s\n", got, wantErrorBody)
}
logBuf.Reset()
writer.Body.Reset()
writer.Code = http.StatusOK
continueHandler.ServeHTTP(writer, request)
if got, want := writer.Code, http.StatusOK; got != want {
t.Errorf("got HTTP status code %d, want %d", got, want)
}
if got := logBuf.String(); got != wantMsg {
t.Errorf("got log message %q, want %q", got, wantMsg)
}
if got := writer.Body.String(); got != wantOKBody {
t.Errorf("got body %q, want %q", got, wantOKBody)
}
defer func() {
if err := recover(); err == nil {
t.Error("expected panic from panicHandler")
}
}()
panicHandler.ServeHTTP(writer, request)
}

View file

@ -1,56 +0,0 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package push_test
import (
"fmt"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/push"
)
func ExampleCollectors() {
completionTime := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "db_backup_last_completion_timestamp_seconds",
Help: "The timestamp of the last succesful completion of a DB backup.",
})
completionTime.Set(float64(time.Now().Unix()))
if err := push.Collectors(
"db_backup", push.HostnameGroupingKey(),
"http://pushgateway:9091",
completionTime,
); err != nil {
fmt.Println("Could not push completion time to Pushgateway:", err)
}
}
func ExampleRegistry() {
registry := prometheus.NewRegistry()
completionTime := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "db_backup_last_completion_timestamp_seconds",
Help: "The timestamp of the last succesful completion of a DB backup.",
})
registry.MustRegister(completionTime)
completionTime.Set(float64(time.Now().Unix()))
if err := push.FromGatherer(
"db_backup", push.HostnameGroupingKey(),
"http://pushgateway:9091",
registry,
); err != nil {
fmt.Println("Could not push completion time to Pushgateway:", err)
}
}

View file

@ -1,172 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright (c) 2013, The Prometheus Authors
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
// Package push provides functions to push metrics to a Pushgateway. The metrics
// to push are either collected from a provided registry, or from explicitly
// listed collectors.
//
// See the documentation of the Pushgateway to understand the meaning of the
// grouping parameters and the differences between push.Registry and
// push.Collectors on the one hand and push.AddRegistry and push.AddCollectors
// on the other hand: https://github.com/prometheus/pushgateway
package push
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"strings"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
"github.com/prometheus/client_golang/prometheus"
)
const contentTypeHeader = "Content-Type"
// FromGatherer triggers a metric collection by the provided Gatherer (which is
// usually implemented by a prometheus.Registry) and pushes all gathered metrics
// to the Pushgateway specified by url, using the provided job name and the
// (optional) further grouping labels (the grouping map may be nil). See the
// Pushgateway documentation for detailed implications of the job and other
// grouping labels. Neither the job name nor any grouping label value may
// contain a "/". The metrics pushed must not contain a job label of their own
// nor any of the grouping labels.
//
// You can use just host:port or ip:port as url, in which case 'http://' is
// added automatically. You can also include the schema in the URL. However, do
// not include the '/metrics/jobs/...' part.
//
// Note that all previously pushed metrics with the same job and other grouping
// labels will be replaced with the metrics pushed by this call. (It uses HTTP
// method 'PUT' to push to the Pushgateway.)
func FromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error {
return push(job, grouping, url, g, "PUT")
}
// AddFromGatherer works like FromGatherer, but only previously pushed metrics
// with the same name (and the same job and other grouping labels) will be
// replaced. (It uses HTTP method 'POST' to push to the Pushgateway.)
func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error {
return push(job, grouping, url, g, "POST")
}
func push(job string, grouping map[string]string, pushURL string, g prometheus.Gatherer, method string) error {
if !strings.Contains(pushURL, "://") {
pushURL = "http://" + pushURL
}
if strings.HasSuffix(pushURL, "/") {
pushURL = pushURL[:len(pushURL)-1]
}
if strings.Contains(job, "/") {
return fmt.Errorf("job contains '/': %s", job)
}
urlComponents := []string{url.QueryEscape(job)}
for ln, lv := range grouping {
if !model.LabelNameRE.MatchString(ln) {
return fmt.Errorf("grouping label has invalid name: %s", ln)
}
if strings.Contains(lv, "/") {
return fmt.Errorf("value of grouping label %s contains '/': %s", ln, lv)
}
urlComponents = append(urlComponents, ln, lv)
}
pushURL = fmt.Sprintf("%s/metrics/job/%s", pushURL, strings.Join(urlComponents, "/"))
mfs, err := g.Gather()
if err != nil {
return err
}
buf := &bytes.Buffer{}
enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
// Check for pre-existing grouping labels:
for _, mf := range mfs {
for _, m := range mf.GetMetric() {
for _, l := range m.GetLabel() {
if l.GetName() == "job" {
return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m)
}
if _, ok := grouping[l.GetName()]; ok {
return fmt.Errorf(
"pushed metric %s (%s) already contains grouping label %s",
mf.GetName(), m, l.GetName(),
)
}
}
}
enc.Encode(mf)
}
req, err := http.NewRequest(method, pushURL, buf)
if err != nil {
return err
}
req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim))
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 202 {
body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only.
return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, pushURL, body)
}
return nil
}
// Collectors works like FromGatherer, but it does not use a Gatherer. Instead,
// it collects from the provided collectors directly. It is a convenient way to
// push only a few metrics.
func Collectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
return pushCollectors(job, grouping, url, "PUT", collectors...)
}
// AddCollectors works like AddFromGatherer, but it does not use a Gatherer.
// Instead, it collects from the provided collectors directly. It is a
// convenient way to push only a few metrics.
func AddCollectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
return pushCollectors(job, grouping, url, "POST", collectors...)
}
func pushCollectors(job string, grouping map[string]string, url, method string, collectors ...prometheus.Collector) error {
r := prometheus.NewRegistry()
for _, collector := range collectors {
if err := r.Register(collector); err != nil {
return err
}
}
return push(job, grouping, url, r, method)
}
// HostnameGroupingKey returns a label map with the only entry
// {instance="<hostname>"}. This can be conveniently used as the grouping
// parameter if metrics should be pushed with the hostname as label. The
// returned map is created upon each call so that the caller is free to add more
// labels to the map.
func HostnameGroupingKey() map[string]string {
hostname, err := os.Hostname()
if err != nil {
return map[string]string{"instance": "unknown"}
}
return map[string]string{"instance": hostname}
}

View file

@ -1,176 +0,0 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright (c) 2013, The Prometheus Authors
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
package push
import (
"bytes"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"testing"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/client_golang/prometheus"
)
func TestPush(t *testing.T) {
var (
lastMethod string
lastBody []byte
lastPath string
)
host, err := os.Hostname()
if err != nil {
t.Error(err)
}
// Fake a Pushgateway that always responds with 202.
pgwOK := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
lastMethod = r.Method
var err error
lastBody, err = ioutil.ReadAll(r.Body)
if err != nil {
t.Fatal(err)
}
lastPath = r.URL.EscapedPath()
w.Header().Set("Content-Type", `text/plain; charset=utf-8`)
w.WriteHeader(http.StatusAccepted)
}),
)
defer pgwOK.Close()
// Fake a Pushgateway that always responds with 500.
pgwErr := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "fake error", http.StatusInternalServerError)
}),
)
defer pgwErr.Close()
metric1 := prometheus.NewCounter(prometheus.CounterOpts{
Name: "testname1",
Help: "testhelp1",
})
metric2 := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "testname2",
Help: "testhelp2",
ConstLabels: prometheus.Labels{"foo": "bar", "dings": "bums"},
})
reg := prometheus.NewRegistry()
reg.MustRegister(metric1)
reg.MustRegister(metric2)
mfs, err := reg.Gather()
if err != nil {
t.Fatal(err)
}
buf := &bytes.Buffer{}
enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
for _, mf := range mfs {
if err := enc.Encode(mf); err != nil {
t.Fatal(err)
}
}
wantBody := buf.Bytes()
// PushCollectors, all good.
if err := Collectors("testjob", HostnameGroupingKey(), pgwOK.URL, metric1, metric2); err != nil {
t.Fatal(err)
}
if lastMethod != "PUT" {
t.Error("want method PUT for PushCollectors, got", lastMethod)
}
if bytes.Compare(lastBody, wantBody) != 0 {
t.Errorf("got body %v, want %v", lastBody, wantBody)
}
if lastPath != "/metrics/job/testjob/instance/"+host {
t.Error("unexpected path:", lastPath)
}
// PushAddCollectors, with nil grouping, all good.
if err := AddCollectors("testjob", nil, pgwOK.URL, metric1, metric2); err != nil {
t.Fatal(err)
}
if lastMethod != "POST" {
t.Error("want method POST for PushAddCollectors, got", lastMethod)
}
if bytes.Compare(lastBody, wantBody) != 0 {
t.Errorf("got body %v, want %v", lastBody, wantBody)
}
if lastPath != "/metrics/job/testjob" {
t.Error("unexpected path:", lastPath)
}
// PushCollectors with a broken PGW.
if err := Collectors("testjob", nil, pgwErr.URL, metric1, metric2); err == nil {
t.Error("push to broken Pushgateway succeeded")
} else {
if got, want := err.Error(), "unexpected status code 500 while pushing to "+pgwErr.URL+"/metrics/job/testjob: fake error\n"; got != want {
t.Errorf("got error %q, want %q", got, want)
}
}
// PushCollectors with invalid grouping or job.
if err := Collectors("testjob", map[string]string{"foo": "bums"}, pgwErr.URL, metric1, metric2); err == nil {
t.Error("push with grouping contained in metrics succeeded")
}
if err := Collectors("test/job", nil, pgwErr.URL, metric1, metric2); err == nil {
t.Error("push with invalid job value succeeded")
}
if err := Collectors("testjob", map[string]string{"foo/bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil {
t.Error("push with invalid grouping succeeded")
}
if err := Collectors("testjob", map[string]string{"foo-bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil {
t.Error("push with invalid grouping succeeded")
}
// Push registry, all good.
if err := FromGatherer("testjob", HostnameGroupingKey(), pgwOK.URL, reg); err != nil {
t.Fatal(err)
}
if lastMethod != "PUT" {
t.Error("want method PUT for Push, got", lastMethod)
}
if bytes.Compare(lastBody, wantBody) != 0 {
t.Errorf("got body %v, want %v", lastBody, wantBody)
}
// PushAdd registry, all good.
if err := AddFromGatherer("testjob", map[string]string{"a": "x", "b": "y"}, pgwOK.URL, reg); err != nil {
t.Fatal(err)
}
if lastMethod != "POST" {
t.Error("want method POSTT for PushAdd, got", lastMethod)
}
if bytes.Compare(lastBody, wantBody) != 0 {
t.Errorf("got body %v, want %v", lastBody, wantBody)
}
if lastPath != "/metrics/job/testjob/a/x/b/y" && lastPath != "/metrics/job/testjob/b/y/a/x" {
t.Error("unexpected path:", lastPath)
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,12 +0,0 @@
# Copyright 2013 Prometheus Team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View file

@ -1,12 +0,0 @@
# Copyright 2013 Prometheus Team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View file

@ -1,14 +0,0 @@
# Copyright 2013 Prometheus Team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['metrics_pb2']

View file

@ -1,575 +0,0 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: metrics.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='metrics.proto',
package='io.prometheus.client',
serialized_pb=_b('\n\rmetrics.proto\x12\x14io.prometheus.client\"(\n\tLabelPair\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\x16\n\x05Gauge\x12\r\n\x05value\x18\x01 \x01(\x01\"\x18\n\x07\x43ounter\x12\r\n\x05value\x18\x01 \x01(\x01\"+\n\x08Quantile\x12\x10\n\x08quantile\x18\x01 \x01(\x01\x12\r\n\x05value\x18\x02 \x01(\x01\"e\n\x07Summary\x12\x14\n\x0csample_count\x18\x01 \x01(\x04\x12\x12\n\nsample_sum\x18\x02 \x01(\x01\x12\x30\n\x08quantile\x18\x03 \x03(\x0b\x32\x1e.io.prometheus.client.Quantile\"\x18\n\x07Untyped\x12\r\n\x05value\x18\x01 \x01(\x01\"c\n\tHistogram\x12\x14\n\x0csample_count\x18\x01 \x01(\x04\x12\x12\n\nsample_sum\x18\x02 \x01(\x01\x12,\n\x06\x62ucket\x18\x03 \x03(\x0b\x32\x1c.io.prometheus.client.Bucket\"7\n\x06\x42ucket\x12\x18\n\x10\x63umulative_count\x18\x01 \x01(\x04\x12\x13\n\x0bupper_bound\x18\x02 \x01(\x01\"\xbe\x02\n\x06Metric\x12.\n\x05label\x18\x01 \x03(\x0b\x32\x1f.io.prometheus.client.LabelPair\x12*\n\x05gauge\x18\x02 \x01(\x0b\x32\x1b.io.prometheus.client.Gauge\x12.\n\x07\x63ounter\x18\x03 \x01(\x0b\x32\x1d.io.prometheus.client.Counter\x12.\n\x07summary\x18\x04 \x01(\x0b\x32\x1d.io.prometheus.client.Summary\x12.\n\x07untyped\x18\x05 \x01(\x0b\x32\x1d.io.prometheus.client.Untyped\x12\x32\n\thistogram\x18\x07 \x01(\x0b\x32\x1f.io.prometheus.client.Histogram\x12\x14\n\x0ctimestamp_ms\x18\x06 \x01(\x03\"\x88\x01\n\x0cMetricFamily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04help\x18\x02 \x01(\t\x12.\n\x04type\x18\x03 \x01(\x0e\x32 .io.prometheus.client.MetricType\x12,\n\x06metric\x18\x04 \x03(\x0b\x32\x1c.io.prometheus.client.Metric*M\n\nMetricType\x12\x0b\n\x07\x43OUNTER\x10\x00\x12\t\n\x05GAUGE\x10\x01\x12\x0b\n\x07SUMMARY\x10\x02\x12\x0b\n\x07UNTYPED\x10\x03\x12\r\n\tHISTOGRAM\x10\x04\x42\x16\n\x14io.prometheus.client')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_METRICTYPE = _descriptor.EnumDescriptor(
name='MetricType',
full_name='io.prometheus.client.MetricType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='COUNTER', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GAUGE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUMMARY', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNTYPED', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HISTOGRAM', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=923,
serialized_end=1000,
)
_sym_db.RegisterEnumDescriptor(_METRICTYPE)
MetricType = enum_type_wrapper.EnumTypeWrapper(_METRICTYPE)
COUNTER = 0
GAUGE = 1
SUMMARY = 2
UNTYPED = 3
HISTOGRAM = 4
_LABELPAIR = _descriptor.Descriptor(
name='LabelPair',
full_name='io.prometheus.client.LabelPair',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='io.prometheus.client.LabelPair.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='io.prometheus.client.LabelPair.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=39,
serialized_end=79,
)
_GAUGE = _descriptor.Descriptor(
name='Gauge',
full_name='io.prometheus.client.Gauge',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='io.prometheus.client.Gauge.value', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=81,
serialized_end=103,
)
_COUNTER = _descriptor.Descriptor(
name='Counter',
full_name='io.prometheus.client.Counter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='io.prometheus.client.Counter.value', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=105,
serialized_end=129,
)
_QUANTILE = _descriptor.Descriptor(
name='Quantile',
full_name='io.prometheus.client.Quantile',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='quantile', full_name='io.prometheus.client.Quantile.quantile', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='io.prometheus.client.Quantile.value', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=131,
serialized_end=174,
)
_SUMMARY = _descriptor.Descriptor(
name='Summary',
full_name='io.prometheus.client.Summary',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sample_count', full_name='io.prometheus.client.Summary.sample_count', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sample_sum', full_name='io.prometheus.client.Summary.sample_sum', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='quantile', full_name='io.prometheus.client.Summary.quantile', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=176,
serialized_end=277,
)
_UNTYPED = _descriptor.Descriptor(
name='Untyped',
full_name='io.prometheus.client.Untyped',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='io.prometheus.client.Untyped.value', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=279,
serialized_end=303,
)
_HISTOGRAM = _descriptor.Descriptor(
name='Histogram',
full_name='io.prometheus.client.Histogram',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sample_count', full_name='io.prometheus.client.Histogram.sample_count', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sample_sum', full_name='io.prometheus.client.Histogram.sample_sum', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bucket', full_name='io.prometheus.client.Histogram.bucket', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=305,
serialized_end=404,
)
_BUCKET = _descriptor.Descriptor(
name='Bucket',
full_name='io.prometheus.client.Bucket',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cumulative_count', full_name='io.prometheus.client.Bucket.cumulative_count', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='upper_bound', full_name='io.prometheus.client.Bucket.upper_bound', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=406,
serialized_end=461,
)
_METRIC = _descriptor.Descriptor(
name='Metric',
full_name='io.prometheus.client.Metric',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='label', full_name='io.prometheus.client.Metric.label', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gauge', full_name='io.prometheus.client.Metric.gauge', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='counter', full_name='io.prometheus.client.Metric.counter', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='summary', full_name='io.prometheus.client.Metric.summary', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='untyped', full_name='io.prometheus.client.Metric.untyped', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='histogram', full_name='io.prometheus.client.Metric.histogram', index=5,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timestamp_ms', full_name='io.prometheus.client.Metric.timestamp_ms', index=6,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=464,
serialized_end=782,
)
_METRICFAMILY = _descriptor.Descriptor(
name='MetricFamily',
full_name='io.prometheus.client.MetricFamily',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='io.prometheus.client.MetricFamily.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='help', full_name='io.prometheus.client.MetricFamily.help', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='io.prometheus.client.MetricFamily.type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='metric', full_name='io.prometheus.client.MetricFamily.metric', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=785,
serialized_end=921,
)
_SUMMARY.fields_by_name['quantile'].message_type = _QUANTILE
_HISTOGRAM.fields_by_name['bucket'].message_type = _BUCKET
_METRIC.fields_by_name['label'].message_type = _LABELPAIR
_METRIC.fields_by_name['gauge'].message_type = _GAUGE
_METRIC.fields_by_name['counter'].message_type = _COUNTER
_METRIC.fields_by_name['summary'].message_type = _SUMMARY
_METRIC.fields_by_name['untyped'].message_type = _UNTYPED
_METRIC.fields_by_name['histogram'].message_type = _HISTOGRAM
_METRICFAMILY.fields_by_name['type'].enum_type = _METRICTYPE
_METRICFAMILY.fields_by_name['metric'].message_type = _METRIC
DESCRIPTOR.message_types_by_name['LabelPair'] = _LABELPAIR
DESCRIPTOR.message_types_by_name['Gauge'] = _GAUGE
DESCRIPTOR.message_types_by_name['Counter'] = _COUNTER
DESCRIPTOR.message_types_by_name['Quantile'] = _QUANTILE
DESCRIPTOR.message_types_by_name['Summary'] = _SUMMARY
DESCRIPTOR.message_types_by_name['Untyped'] = _UNTYPED
DESCRIPTOR.message_types_by_name['Histogram'] = _HISTOGRAM
DESCRIPTOR.message_types_by_name['Bucket'] = _BUCKET
DESCRIPTOR.message_types_by_name['Metric'] = _METRIC
DESCRIPTOR.message_types_by_name['MetricFamily'] = _METRICFAMILY
DESCRIPTOR.enum_types_by_name['MetricType'] = _METRICTYPE
LabelPair = _reflection.GeneratedProtocolMessageType('LabelPair', (_message.Message,), dict(
DESCRIPTOR = _LABELPAIR,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.LabelPair)
))
_sym_db.RegisterMessage(LabelPair)
Gauge = _reflection.GeneratedProtocolMessageType('Gauge', (_message.Message,), dict(
DESCRIPTOR = _GAUGE,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.Gauge)
))
_sym_db.RegisterMessage(Gauge)
Counter = _reflection.GeneratedProtocolMessageType('Counter', (_message.Message,), dict(
DESCRIPTOR = _COUNTER,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.Counter)
))
_sym_db.RegisterMessage(Counter)
Quantile = _reflection.GeneratedProtocolMessageType('Quantile', (_message.Message,), dict(
DESCRIPTOR = _QUANTILE,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.Quantile)
))
_sym_db.RegisterMessage(Quantile)
Summary = _reflection.GeneratedProtocolMessageType('Summary', (_message.Message,), dict(
DESCRIPTOR = _SUMMARY,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.Summary)
))
_sym_db.RegisterMessage(Summary)
Untyped = _reflection.GeneratedProtocolMessageType('Untyped', (_message.Message,), dict(
DESCRIPTOR = _UNTYPED,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.Untyped)
))
_sym_db.RegisterMessage(Untyped)
Histogram = _reflection.GeneratedProtocolMessageType('Histogram', (_message.Message,), dict(
DESCRIPTOR = _HISTOGRAM,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.Histogram)
))
_sym_db.RegisterMessage(Histogram)
Bucket = _reflection.GeneratedProtocolMessageType('Bucket', (_message.Message,), dict(
DESCRIPTOR = _BUCKET,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.Bucket)
))
_sym_db.RegisterMessage(Bucket)
Metric = _reflection.GeneratedProtocolMessageType('Metric', (_message.Message,), dict(
DESCRIPTOR = _METRIC,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.Metric)
))
_sym_db.RegisterMessage(Metric)
MetricFamily = _reflection.GeneratedProtocolMessageType('MetricFamily', (_message.Message,), dict(
DESCRIPTOR = _METRICFAMILY,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.MetricFamily)
))
_sym_db.RegisterMessage(MetricFamily)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\024io.prometheus.client'))
# @@protoc_insertion_point(module_scope)

View file

@ -1,5 +0,0 @@
*.gem
.bundle
Gemfile.lock
pkg
vendor/bundle

View file

@ -1,4 +0,0 @@
source 'https://rubygems.org'
# Specify your gem's dependencies in prometheus-client-model.gemspec
gemspec

View file

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,17 +0,0 @@
VENDOR_BUNDLE = vendor/bundle
build: $(VENDOR_BUNDLE)/.bundled
BEEFCAKE_NAMESPACE=Prometheus::Client protoc --beefcake_out lib/prometheus/client/model -I .. ../metrics.proto
$(VENDOR_BUNDLE):
mkdir -p $@
$(VENDOR_BUNDLE)/.bundled: $(VENDOR_BUNDLE) Gemfile
bundle install --quiet --path $<
@touch $@
clean:
-rm -f lib/prometheus/client/model/metrics.pb.rb
-rm -rf $(VENDOR_BUNDLE)
.PHONY: build clean

View file

@ -1,31 +0,0 @@
# Prometheus Ruby client model
Data model artifacts for the [Prometheus Ruby client][1].
## Installation
gem install prometheus-client-model
## Usage
Build the artifacts from the protobuf specification:
make build
While this Gem's main purpose is to define the Prometheus data types for the
[client][1], it's possible to use it without the client to decode a stream of
delimited protobuf messages:
```ruby
require 'open-uri'
require 'prometheus/client/model'
CONTENT_TYPE = 'application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited'
stream = open('http://localhost:9090/metrics', 'Accept' => CONTENT_TYPE).read
while family = Prometheus::Client::MetricFamily.read_delimited(stream)
puts family
end
```
[1]: https://github.com/prometheus/client_ruby

View file

@ -1 +0,0 @@
require "bundler/gem_tasks"

View file

@ -1,2 +0,0 @@
require 'prometheus/client/model/metrics.pb'
require 'prometheus/client/model/version'

View file

@ -1,111 +0,0 @@
## Generated from metrics.proto for io.prometheus.client
require "beefcake"
module Prometheus
module Client
module MetricType
COUNTER = 0
GAUGE = 1
SUMMARY = 2
UNTYPED = 3
HISTOGRAM = 4
end
class LabelPair
include Beefcake::Message
end
class Gauge
include Beefcake::Message
end
class Counter
include Beefcake::Message
end
class Quantile
include Beefcake::Message
end
class Summary
include Beefcake::Message
end
class Untyped
include Beefcake::Message
end
class Histogram
include Beefcake::Message
end
class Bucket
include Beefcake::Message
end
class Metric
include Beefcake::Message
end
class MetricFamily
include Beefcake::Message
end
class LabelPair
optional :name, :string, 1
optional :value, :string, 2
end
class Gauge
optional :value, :double, 1
end
class Counter
optional :value, :double, 1
end
class Quantile
optional :quantile, :double, 1
optional :value, :double, 2
end
class Summary
optional :sample_count, :uint64, 1
optional :sample_sum, :double, 2
repeated :quantile, Quantile, 3
end
class Untyped
optional :value, :double, 1
end
class Histogram
optional :sample_count, :uint64, 1
optional :sample_sum, :double, 2
repeated :bucket, Bucket, 3
end
class Bucket
optional :cumulative_count, :uint64, 1
optional :upper_bound, :double, 2
end
class Metric
repeated :label, LabelPair, 1
optional :gauge, Gauge, 2
optional :counter, Counter, 3
optional :summary, Summary, 4
optional :untyped, Untyped, 5
optional :histogram, Histogram, 7
optional :timestamp_ms, :int64, 6
end
class MetricFamily
optional :name, :string, 1
optional :help, :string, 2
optional :type, MetricType, 3
repeated :metric, Metric, 4
end
end
end

View file

@ -1,7 +0,0 @@
module Prometheus
module Client
module Model
VERSION = '0.1.0'
end
end
end

View file

@ -1,22 +0,0 @@
# coding: utf-8
lib = File.expand_path('../lib', __FILE__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
require 'prometheus/client/model/version'
Gem::Specification.new do |spec|
spec.name = 'prometheus-client-model'
spec.version = Prometheus::Client::Model::VERSION
spec.authors = ['Tobias Schmidt']
spec.email = ['tobidt@gmail.com']
spec.summary = 'Data model artifacts for the Prometheus Ruby client'
spec.homepage = 'https://github.com/prometheus/client_model/tree/master/ruby'
spec.license = 'Apache 2.0'
spec.files = %w[README.md LICENSE] + Dir.glob('{lib/**/*}')
spec.require_paths = ['lib']
spec.add_dependency 'beefcake', '>= 0.4.0'
spec.add_development_dependency 'bundler', '~> 1.3'
spec.add_development_dependency 'rake'
end

File diff suppressed because it is too large Load diff

View file

@ -1,30 +0,0 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"fmt"
"strings"
)
func checkOverflow(m map[string]interface{}, ctx string) error {
if len(m) > 0 {
var keys []string
for k := range m {
keys = append(keys, k)
}
return fmt.Errorf("unknown fields in %s: %s", ctx, strings.Join(keys, ", "))
}
return nil
}

View file

@ -1 +0,0 @@
cert_file: somefile

View file

@ -1 +0,0 @@
insecure_skip_verify: true

View file

@ -1 +0,0 @@
something_invalid: true

View file

@ -1 +0,0 @@
key_file: somefile

View file

@ -1,79 +0,0 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
)
// TLSConfig configures the options for TLS connections.
type TLSConfig struct {
// The CA cert to use for the targets.
CAFile string `yaml:"ca_file,omitempty"`
// The client cert file for the targets.
CertFile string `yaml:"cert_file,omitempty"`
// The client key file for the targets.
KeyFile string `yaml:"key_file,omitempty"`
// Disable target certificate validation.
InsecureSkipVerify bool `yaml:"insecure_skip_verify"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
type plain TLSConfig
if err := unmarshal((*plain)(c)); err != nil {
return err
}
return checkOverflow(c.XXX, "TLS config")
}
// GenerateConfig produces a tls.Config based on TLS connection options.
// It loads certificate files from disk if they are defined.
func (c *TLSConfig) GenerateConfig() (*tls.Config, error) {
tlsConfig := &tls.Config{InsecureSkipVerify: c.InsecureSkipVerify}
// If a CA cert is provided then let's read it in so we can validate the
// scrape target's certificate properly.
if len(c.CAFile) > 0 {
caCertPool := x509.NewCertPool()
// Load CA cert.
caCert, err := ioutil.ReadFile(c.CAFile)
if err != nil {
return nil, fmt.Errorf("unable to use specified CA cert %s: %s", c.CAFile, err)
}
caCertPool.AppendCertsFromPEM(caCert)
tlsConfig.RootCAs = caCertPool
}
if len(c.CertFile) > 0 && len(c.KeyFile) == 0 {
return nil, fmt.Errorf("client cert file %q specified without client key file", c.CertFile)
} else if len(c.KeyFile) > 0 && len(c.CertFile) == 0 {
return nil, fmt.Errorf("client key file %q specified without client cert file", c.KeyFile)
} else if len(c.CertFile) > 0 && len(c.KeyFile) > 0 {
cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile)
if err != nil {
return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err)
}
tlsConfig.Certificates = []tls.Certificate{cert}
}
tlsConfig.BuildNameToCertificate()
return tlsConfig, nil
}

View file

@ -1,92 +0,0 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"crypto/tls"
"io/ioutil"
"reflect"
"strings"
"testing"
"gopkg.in/yaml.v2"
)
// LoadTLSConfig parses the given YAML file into a tls.Config.
func LoadTLSConfig(filename string) (*tls.Config, error) {
content, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
cfg := &TLSConfig{}
if err = yaml.Unmarshal(content, cfg); err != nil {
return nil, err
}
return cfg.GenerateConfig()
}
var expectedTLSConfigs = []struct {
filename string
config *tls.Config
}{
{
filename: "tls_config.empty.good.yml",
config: &tls.Config{},
}, {
filename: "tls_config.insecure.good.yml",
config: &tls.Config{InsecureSkipVerify: true},
},
}
func TestValidTLSConfig(t *testing.T) {
for _, cfg := range expectedTLSConfigs {
cfg.config.BuildNameToCertificate()
got, err := LoadTLSConfig("testdata/" + cfg.filename)
if err != nil {
t.Errorf("Error parsing %s: %s", cfg.filename, err)
}
if !reflect.DeepEqual(*got, *cfg.config) {
t.Fatalf("%s: unexpected config result: \n\n%s\n expected\n\n%s", cfg.filename, got, cfg.config)
}
}
}
var expectedTLSConfigErrors = []struct {
filename string
errMsg string
}{
{
filename: "tls_config.invalid_field.bad.yml",
errMsg: "unknown fields in",
}, {
filename: "tls_config.cert_no_key.bad.yml",
errMsg: "specified without client key file",
}, {
filename: "tls_config.key_no_cert.bad.yml",
errMsg: "specified without client cert file",
},
}
func TestBadTLSConfigs(t *testing.T) {
for _, ee := range expectedTLSConfigErrors {
_, err := LoadTLSConfig("testdata/" + ee.filename)
if err == nil {
t.Errorf("Expected error parsing %s but got none", ee.filename)
continue
}
if !strings.Contains(err.Error(), ee.errMsg) {
t.Errorf("Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
}
}
}

View file

@ -1,2 +0,0 @@

View file

@ -1,6 +0,0 @@
minimal_metric 1.234
another_metric -3e3 103948
# Even that:
no_labels{} 3
# HELP line for non-existing metric will be ignored.

View file

@ -1,12 +0,0 @@
# A normal comment.
#
# TYPE name counter
name{labelname="val1",basename="basevalue"} NaN
name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890
# HELP name two-line\n doc str\\ing
# HELP name2 doc str"ing 2
# TYPE name2 gauge
name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321
name2{ labelname = "val1" , }-Inf

View file

@ -1,22 +0,0 @@
# TYPE my_summary summary
my_summary{n1="val1",quantile="0.5"} 110
decoy -1 -2
my_summary{n1="val1",quantile="0.9"} 140 1
my_summary_count{n1="val1"} 42
# Latest timestamp wins in case of a summary.
my_summary_sum{n1="val1"} 4711 2
fake_sum{n1="val1"} 2001
# TYPE another_summary summary
another_summary_count{n2="val2",n1="val1"} 20
my_summary_count{n2="val2",n1="val1"} 5 5
another_summary{n1="val1",n2="val2",quantile=".3"} -1.2
my_summary_sum{n1="val2"} 08 15
my_summary{n1="val3", quantile="0.2"} 4711
my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN
# some
# funny comments
# HELP
# HELP
# HELP my_summary
# HELP my_summary

View file

@ -1,10 +0,0 @@
# HELP request_duration_microseconds The response latency.
# TYPE request_duration_microseconds histogram
request_duration_microseconds_bucket{le="100"} 123
request_duration_microseconds_bucket{le="120"} 412
request_duration_microseconds_bucket{le="144"} 592
request_duration_microseconds_bucket{le="172.8"} 1524
request_duration_microseconds_bucket{le="+Inf"} 2693
request_duration_microseconds_sum 1.7560473e+06
request_duration_microseconds_count 2693

View file

@ -1 +0,0 @@
bla 3.14

View file

@ -1 +0,0 @@
metric{label="\t"} 3.14

View file

@ -1 +0,0 @@
metric{label="bla"} 3.14 2 3

View file

@ -1 +0,0 @@
metric{label="bla"} blubb

View file

@ -1,3 +0,0 @@
# HELP metric one
# HELP metric two

View file

@ -1,3 +0,0 @@
# TYPE metric counter
# TYPE metric untyped

View file

@ -1,3 +0,0 @@
metric 4.12
# TYPE metric counter

View file

@ -1,2 +0,0 @@
# TYPE metric bla

View file

@ -1,2 +0,0 @@
# TYPE met-ric

View file

@ -1 +0,0 @@
@invalidmetric{label="bla"} 3.14 2

View file

@ -1 +0,0 @@
{label="bla"} 3.14 2

View file

@ -1,3 +0,0 @@
# TYPE metric histogram
metric_bucket{le="bla"} 3.14

View file

@ -1,3 +0,0 @@
metric{label="new
line"} 3.14

View file

@ -1 +0,0 @@
metric{@="bla"} 3.14

View file

@ -1 +0,0 @@
metric{__name__="bla"} 3.14

View file

@ -1 +0,0 @@
metric{label+="bla"} 3.14

View file

@ -1 +0,0 @@
metric{label=bla} 3.14

View file

@ -1,3 +0,0 @@
# TYPE metric summary
metric{quantile="bla"} 3.14

View file

@ -1 +0,0 @@
metric{label="bla"+} 3.14

View file

@ -1 +0,0 @@
metric{label="bla"} 3.14 2.72

View file

@ -1 +0,0 @@
m{} 0

View file

@ -1,46 +0,0 @@
[
{
"baseLabels": {
"__name__": "rpc_calls_total",
"job": "batch_job"
},
"docstring": "RPC calls.",
"metric": {
"type": "counter",
"value": [
{
"labels": {
"service": "zed"
},
"value": 25
},
{
"labels": {
"service": "bar"
},
"value": 24
}
]
}
},
{
"baseLabels": {
"__name__": "rpc_latency_microseconds"
},
"docstring": "RPC latency.",
"metric": {
"type": "histogram",
"value": [
{
"labels": {
"service": "foo"
},
"value": {
"0.010000": 15,
"0.990000": 17
}
}
]
}
}
]

View file

@ -1,46 +0,0 @@
[
{
"baseLabels": {
"__name__": "rpc_calls_total",
"job": "batch_job"
},
"docstring": "RPC calls.",
"metric": {
"type": "counter",
"value": [
{
"labels": {
"servic|e": "zed"
},
"value": 25
},
{
"labels": {
"service": "bar"
},
"value": 24
}
]
}
},
{
"baseLabels": {
"__name__": "rpc_latency_microseconds"
},
"docstring": "RPC latency.",
"metric": {
"type": "histogram",
"value": [
{
"labels": {
"service": "foo"
},
"value": {
"0.010000": 15,
"0.990000": 17
}
}
]
}
}
]

Binary file not shown.

Binary file not shown.

View file

@ -1,322 +0,0 @@
# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
# TYPE http_request_duration_microseconds summary
http_request_duration_microseconds{handler="/",quantile="0.5"} 0
http_request_duration_microseconds{handler="/",quantile="0.9"} 0
http_request_duration_microseconds{handler="/",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/"} 0
http_request_duration_microseconds_count{handler="/"} 0
http_request_duration_microseconds{handler="/alerts",quantile="0.5"} 0
http_request_duration_microseconds{handler="/alerts",quantile="0.9"} 0
http_request_duration_microseconds{handler="/alerts",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/alerts"} 0
http_request_duration_microseconds_count{handler="/alerts"} 0
http_request_duration_microseconds{handler="/api/metrics",quantile="0.5"} 0
http_request_duration_microseconds{handler="/api/metrics",quantile="0.9"} 0
http_request_duration_microseconds{handler="/api/metrics",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/api/metrics"} 0
http_request_duration_microseconds_count{handler="/api/metrics"} 0
http_request_duration_microseconds{handler="/api/query",quantile="0.5"} 0
http_request_duration_microseconds{handler="/api/query",quantile="0.9"} 0
http_request_duration_microseconds{handler="/api/query",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/api/query"} 0
http_request_duration_microseconds_count{handler="/api/query"} 0
http_request_duration_microseconds{handler="/api/query_range",quantile="0.5"} 0
http_request_duration_microseconds{handler="/api/query_range",quantile="0.9"} 0
http_request_duration_microseconds{handler="/api/query_range",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/api/query_range"} 0
http_request_duration_microseconds_count{handler="/api/query_range"} 0
http_request_duration_microseconds{handler="/api/targets",quantile="0.5"} 0
http_request_duration_microseconds{handler="/api/targets",quantile="0.9"} 0
http_request_duration_microseconds{handler="/api/targets",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/api/targets"} 0
http_request_duration_microseconds_count{handler="/api/targets"} 0
http_request_duration_microseconds{handler="/consoles/",quantile="0.5"} 0
http_request_duration_microseconds{handler="/consoles/",quantile="0.9"} 0
http_request_duration_microseconds{handler="/consoles/",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/consoles/"} 0
http_request_duration_microseconds_count{handler="/consoles/"} 0
http_request_duration_microseconds{handler="/graph",quantile="0.5"} 0
http_request_duration_microseconds{handler="/graph",quantile="0.9"} 0
http_request_duration_microseconds{handler="/graph",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/graph"} 0
http_request_duration_microseconds_count{handler="/graph"} 0
http_request_duration_microseconds{handler="/heap",quantile="0.5"} 0
http_request_duration_microseconds{handler="/heap",quantile="0.9"} 0
http_request_duration_microseconds{handler="/heap",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/heap"} 0
http_request_duration_microseconds_count{handler="/heap"} 0
http_request_duration_microseconds{handler="/static/",quantile="0.5"} 0
http_request_duration_microseconds{handler="/static/",quantile="0.9"} 0
http_request_duration_microseconds{handler="/static/",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/static/"} 0
http_request_duration_microseconds_count{handler="/static/"} 0
http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1307.275
http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1858.632
http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 3087.384
http_request_duration_microseconds_sum{handler="prometheus"} 179886.5000000001
http_request_duration_microseconds_count{handler="prometheus"} 119
# HELP http_request_size_bytes The HTTP request sizes in bytes.
# TYPE http_request_size_bytes summary
http_request_size_bytes{handler="/",quantile="0.5"} 0
http_request_size_bytes{handler="/",quantile="0.9"} 0
http_request_size_bytes{handler="/",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/"} 0
http_request_size_bytes_count{handler="/"} 0
http_request_size_bytes{handler="/alerts",quantile="0.5"} 0
http_request_size_bytes{handler="/alerts",quantile="0.9"} 0
http_request_size_bytes{handler="/alerts",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/alerts"} 0
http_request_size_bytes_count{handler="/alerts"} 0
http_request_size_bytes{handler="/api/metrics",quantile="0.5"} 0
http_request_size_bytes{handler="/api/metrics",quantile="0.9"} 0
http_request_size_bytes{handler="/api/metrics",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/api/metrics"} 0
http_request_size_bytes_count{handler="/api/metrics"} 0
http_request_size_bytes{handler="/api/query",quantile="0.5"} 0
http_request_size_bytes{handler="/api/query",quantile="0.9"} 0
http_request_size_bytes{handler="/api/query",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/api/query"} 0
http_request_size_bytes_count{handler="/api/query"} 0
http_request_size_bytes{handler="/api/query_range",quantile="0.5"} 0
http_request_size_bytes{handler="/api/query_range",quantile="0.9"} 0
http_request_size_bytes{handler="/api/query_range",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/api/query_range"} 0
http_request_size_bytes_count{handler="/api/query_range"} 0
http_request_size_bytes{handler="/api/targets",quantile="0.5"} 0
http_request_size_bytes{handler="/api/targets",quantile="0.9"} 0
http_request_size_bytes{handler="/api/targets",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/api/targets"} 0
http_request_size_bytes_count{handler="/api/targets"} 0
http_request_size_bytes{handler="/consoles/",quantile="0.5"} 0
http_request_size_bytes{handler="/consoles/",quantile="0.9"} 0
http_request_size_bytes{handler="/consoles/",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/consoles/"} 0
http_request_size_bytes_count{handler="/consoles/"} 0
http_request_size_bytes{handler="/graph",quantile="0.5"} 0
http_request_size_bytes{handler="/graph",quantile="0.9"} 0
http_request_size_bytes{handler="/graph",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/graph"} 0
http_request_size_bytes_count{handler="/graph"} 0
http_request_size_bytes{handler="/heap",quantile="0.5"} 0
http_request_size_bytes{handler="/heap",quantile="0.9"} 0
http_request_size_bytes{handler="/heap",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/heap"} 0
http_request_size_bytes_count{handler="/heap"} 0
http_request_size_bytes{handler="/static/",quantile="0.5"} 0
http_request_size_bytes{handler="/static/",quantile="0.9"} 0
http_request_size_bytes{handler="/static/",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/static/"} 0
http_request_size_bytes_count{handler="/static/"} 0
http_request_size_bytes{handler="prometheus",quantile="0.5"} 291
http_request_size_bytes{handler="prometheus",quantile="0.9"} 291
http_request_size_bytes{handler="prometheus",quantile="0.99"} 291
http_request_size_bytes_sum{handler="prometheus"} 34488
http_request_size_bytes_count{handler="prometheus"} 119
# HELP http_requests_total Total number of HTTP requests made.
# TYPE http_requests_total counter
http_requests_total{code="200",handler="prometheus",method="get"} 119
# HELP http_response_size_bytes The HTTP response sizes in bytes.
# TYPE http_response_size_bytes summary
http_response_size_bytes{handler="/",quantile="0.5"} 0
http_response_size_bytes{handler="/",quantile="0.9"} 0
http_response_size_bytes{handler="/",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/"} 0
http_response_size_bytes_count{handler="/"} 0
http_response_size_bytes{handler="/alerts",quantile="0.5"} 0
http_response_size_bytes{handler="/alerts",quantile="0.9"} 0
http_response_size_bytes{handler="/alerts",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/alerts"} 0
http_response_size_bytes_count{handler="/alerts"} 0
http_response_size_bytes{handler="/api/metrics",quantile="0.5"} 0
http_response_size_bytes{handler="/api/metrics",quantile="0.9"} 0
http_response_size_bytes{handler="/api/metrics",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/api/metrics"} 0
http_response_size_bytes_count{handler="/api/metrics"} 0
http_response_size_bytes{handler="/api/query",quantile="0.5"} 0
http_response_size_bytes{handler="/api/query",quantile="0.9"} 0
http_response_size_bytes{handler="/api/query",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/api/query"} 0
http_response_size_bytes_count{handler="/api/query"} 0
http_response_size_bytes{handler="/api/query_range",quantile="0.5"} 0
http_response_size_bytes{handler="/api/query_range",quantile="0.9"} 0
http_response_size_bytes{handler="/api/query_range",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/api/query_range"} 0
http_response_size_bytes_count{handler="/api/query_range"} 0
http_response_size_bytes{handler="/api/targets",quantile="0.5"} 0
http_response_size_bytes{handler="/api/targets",quantile="0.9"} 0
http_response_size_bytes{handler="/api/targets",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/api/targets"} 0
http_response_size_bytes_count{handler="/api/targets"} 0
http_response_size_bytes{handler="/consoles/",quantile="0.5"} 0
http_response_size_bytes{handler="/consoles/",quantile="0.9"} 0
http_response_size_bytes{handler="/consoles/",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/consoles/"} 0
http_response_size_bytes_count{handler="/consoles/"} 0
http_response_size_bytes{handler="/graph",quantile="0.5"} 0
http_response_size_bytes{handler="/graph",quantile="0.9"} 0
http_response_size_bytes{handler="/graph",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/graph"} 0
http_response_size_bytes_count{handler="/graph"} 0
http_response_size_bytes{handler="/heap",quantile="0.5"} 0
http_response_size_bytes{handler="/heap",quantile="0.9"} 0
http_response_size_bytes{handler="/heap",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/heap"} 0
http_response_size_bytes_count{handler="/heap"} 0
http_response_size_bytes{handler="/static/",quantile="0.5"} 0
http_response_size_bytes{handler="/static/",quantile="0.9"} 0
http_response_size_bytes{handler="/static/",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/static/"} 0
http_response_size_bytes_count{handler="/static/"} 0
http_response_size_bytes{handler="prometheus",quantile="0.5"} 2049
http_response_size_bytes{handler="prometheus",quantile="0.9"} 2058
http_response_size_bytes{handler="prometheus",quantile="0.99"} 2064
http_response_size_bytes_sum{handler="prometheus"} 247001
http_response_size_bytes_count{handler="prometheus"} 119
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
# TYPE process_cpu_seconds_total counter
process_cpu_seconds_total 0.55
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
go_goroutines 70
# HELP process_max_fds Maximum number of open file descriptors.
# TYPE process_max_fds gauge
process_max_fds 8192
# HELP process_open_fds Number of open file descriptors.
# TYPE process_open_fds gauge
process_open_fds 29
# HELP process_resident_memory_bytes Resident memory size in bytes.
# TYPE process_resident_memory_bytes gauge
process_resident_memory_bytes 5.3870592e+07
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
# TYPE process_start_time_seconds gauge
process_start_time_seconds 1.42236894836e+09
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
# TYPE process_virtual_memory_bytes gauge
process_virtual_memory_bytes 5.41478912e+08
# HELP prometheus_dns_sd_lookup_failures_total The number of DNS-SD lookup failures.
# TYPE prometheus_dns_sd_lookup_failures_total counter
prometheus_dns_sd_lookup_failures_total 0
# HELP prometheus_dns_sd_lookups_total The number of DNS-SD lookups.
# TYPE prometheus_dns_sd_lookups_total counter
prometheus_dns_sd_lookups_total 7
# HELP prometheus_evaluator_duration_milliseconds The duration for all evaluations to execute.
# TYPE prometheus_evaluator_duration_milliseconds summary
prometheus_evaluator_duration_milliseconds{quantile="0.01"} 0
prometheus_evaluator_duration_milliseconds{quantile="0.05"} 0
prometheus_evaluator_duration_milliseconds{quantile="0.5"} 0
prometheus_evaluator_duration_milliseconds{quantile="0.9"} 1
prometheus_evaluator_duration_milliseconds{quantile="0.99"} 1
prometheus_evaluator_duration_milliseconds_sum 12
prometheus_evaluator_duration_milliseconds_count 23
# HELP prometheus_local_storage_checkpoint_duration_milliseconds The duration (in milliseconds) it took to checkpoint in-memory metrics and head chunks.
# TYPE prometheus_local_storage_checkpoint_duration_milliseconds gauge
prometheus_local_storage_checkpoint_duration_milliseconds 0
# HELP prometheus_local_storage_chunk_ops_total The total number of chunk operations by their type.
# TYPE prometheus_local_storage_chunk_ops_total counter
prometheus_local_storage_chunk_ops_total{type="create"} 598
prometheus_local_storage_chunk_ops_total{type="persist"} 174
prometheus_local_storage_chunk_ops_total{type="pin"} 920
prometheus_local_storage_chunk_ops_total{type="transcode"} 415
prometheus_local_storage_chunk_ops_total{type="unpin"} 920
# HELP prometheus_local_storage_indexing_batch_latency_milliseconds Quantiles for batch indexing latencies in milliseconds.
# TYPE prometheus_local_storage_indexing_batch_latency_milliseconds summary
prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.5"} 0
prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.9"} 0
prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.99"} 0
prometheus_local_storage_indexing_batch_latency_milliseconds_sum 0
prometheus_local_storage_indexing_batch_latency_milliseconds_count 1
# HELP prometheus_local_storage_indexing_batch_sizes Quantiles for indexing batch sizes (number of metrics per batch).
# TYPE prometheus_local_storage_indexing_batch_sizes summary
prometheus_local_storage_indexing_batch_sizes{quantile="0.5"} 2
prometheus_local_storage_indexing_batch_sizes{quantile="0.9"} 2
prometheus_local_storage_indexing_batch_sizes{quantile="0.99"} 2
prometheus_local_storage_indexing_batch_sizes_sum 2
prometheus_local_storage_indexing_batch_sizes_count 1
# HELP prometheus_local_storage_indexing_queue_capacity The capacity of the indexing queue.
# TYPE prometheus_local_storage_indexing_queue_capacity gauge
prometheus_local_storage_indexing_queue_capacity 16384
# HELP prometheus_local_storage_indexing_queue_length The number of metrics waiting to be indexed.
# TYPE prometheus_local_storage_indexing_queue_length gauge
prometheus_local_storage_indexing_queue_length 0
# HELP prometheus_local_storage_ingested_samples_total The total number of samples ingested.
# TYPE prometheus_local_storage_ingested_samples_total counter
prometheus_local_storage_ingested_samples_total 30473
# HELP prometheus_local_storage_invalid_preload_requests_total The total number of preload requests referring to a non-existent series. This is an indication of outdated label indexes.
# TYPE prometheus_local_storage_invalid_preload_requests_total counter
prometheus_local_storage_invalid_preload_requests_total 0
# HELP prometheus_local_storage_memory_chunkdescs The current number of chunk descriptors in memory.
# TYPE prometheus_local_storage_memory_chunkdescs gauge
prometheus_local_storage_memory_chunkdescs 1059
# HELP prometheus_local_storage_memory_chunks The current number of chunks in memory, excluding cloned chunks (i.e. chunks without a descriptor).
# TYPE prometheus_local_storage_memory_chunks gauge
prometheus_local_storage_memory_chunks 1020
# HELP prometheus_local_storage_memory_series The current number of series in memory.
# TYPE prometheus_local_storage_memory_series gauge
prometheus_local_storage_memory_series 424
# HELP prometheus_local_storage_persist_latency_microseconds A summary of latencies for persisting each chunk.
# TYPE prometheus_local_storage_persist_latency_microseconds summary
prometheus_local_storage_persist_latency_microseconds{quantile="0.5"} 30.377
prometheus_local_storage_persist_latency_microseconds{quantile="0.9"} 203.539
prometheus_local_storage_persist_latency_microseconds{quantile="0.99"} 2626.463
prometheus_local_storage_persist_latency_microseconds_sum 20424.415
prometheus_local_storage_persist_latency_microseconds_count 174
# HELP prometheus_local_storage_persist_queue_capacity The total capacity of the persist queue.
# TYPE prometheus_local_storage_persist_queue_capacity gauge
prometheus_local_storage_persist_queue_capacity 1024
# HELP prometheus_local_storage_persist_queue_length The current number of chunks waiting in the persist queue.
# TYPE prometheus_local_storage_persist_queue_length gauge
prometheus_local_storage_persist_queue_length 0
# HELP prometheus_local_storage_series_ops_total The total number of series operations by their type.
# TYPE prometheus_local_storage_series_ops_total counter
prometheus_local_storage_series_ops_total{type="create"} 2
prometheus_local_storage_series_ops_total{type="maintenance_in_memory"} 11
# HELP prometheus_notifications_latency_milliseconds Latency quantiles for sending alert notifications (not including dropped notifications).
# TYPE prometheus_notifications_latency_milliseconds summary
prometheus_notifications_latency_milliseconds{quantile="0.5"} 0
prometheus_notifications_latency_milliseconds{quantile="0.9"} 0
prometheus_notifications_latency_milliseconds{quantile="0.99"} 0
prometheus_notifications_latency_milliseconds_sum 0
prometheus_notifications_latency_milliseconds_count 0
# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue.
# TYPE prometheus_notifications_queue_capacity gauge
prometheus_notifications_queue_capacity 100
# HELP prometheus_notifications_queue_length The number of alert notifications in the queue.
# TYPE prometheus_notifications_queue_length gauge
prometheus_notifications_queue_length 0
# HELP prometheus_rule_evaluation_duration_milliseconds The duration for a rule to execute.
# TYPE prometheus_rule_evaluation_duration_milliseconds summary
prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.5"} 0
prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.9"} 0
prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.99"} 2
prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="alerting"} 12
prometheus_rule_evaluation_duration_milliseconds_count{rule_type="alerting"} 115
prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.5"} 0
prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.9"} 0
prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.99"} 3
prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="recording"} 15
prometheus_rule_evaluation_duration_milliseconds_count{rule_type="recording"} 115
# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures.
# TYPE prometheus_rule_evaluation_failures_total counter
prometheus_rule_evaluation_failures_total 0
# HELP prometheus_samples_queue_capacity Capacity of the queue for unwritten samples.
# TYPE prometheus_samples_queue_capacity gauge
prometheus_samples_queue_capacity 4096
# HELP prometheus_samples_queue_length Current number of items in the queue for unwritten samples. Each item comprises all samples exposed by one target as one metric family (i.e. metrics of the same name).
# TYPE prometheus_samples_queue_length gauge
prometheus_samples_queue_length 0
# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes.
# TYPE prometheus_target_interval_length_seconds summary
prometheus_target_interval_length_seconds{interval="15s",quantile="0.01"} 14
prometheus_target_interval_length_seconds{interval="15s",quantile="0.05"} 14
prometheus_target_interval_length_seconds{interval="15s",quantile="0.5"} 15
prometheus_target_interval_length_seconds{interval="15s",quantile="0.9"} 15
prometheus_target_interval_length_seconds{interval="15s",quantile="0.99"} 15
prometheus_target_interval_length_seconds_sum{interval="15s"} 175
prometheus_target_interval_length_seconds_count{interval="15s"} 12
prometheus_target_interval_length_seconds{interval="1s",quantile="0.01"} 0
prometheus_target_interval_length_seconds{interval="1s",quantile="0.05"} 0
prometheus_target_interval_length_seconds{interval="1s",quantile="0.5"} 0
prometheus_target_interval_length_seconds{interval="1s",quantile="0.9"} 1
prometheus_target_interval_length_seconds{interval="1s",quantile="0.99"} 1
prometheus_target_interval_length_seconds_sum{interval="1s"} 55
prometheus_target_interval_length_seconds_count{interval="1s"} 117

Binary file not shown.

View file

@ -1,89 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build windows
package log
import (
"fmt"
"os"
"golang.org/x/sys/windows/svc/eventlog"
"github.com/Sirupsen/logrus"
)
func init() {
setEventlogFormatter = func(name string, debugAsInfo bool) error {
if name == "" {
return fmt.Errorf("missing name parameter")
}
fmter, err := newEventlogger(name, debugAsInfo, origLogger.Formatter)
if err != nil {
fmt.Fprintf(os.Stderr, "error creating eventlog formatter: %v\n", err)
origLogger.Errorf("can't connect logger to eventlog: %v", err)
return err
}
origLogger.Formatter = fmter
return nil
}
}
type eventlogger struct {
log *eventlog.Log
debugAsInfo bool
wrap logrus.Formatter
}
func newEventlogger(name string, debugAsInfo bool, fmter logrus.Formatter) (*eventlogger, error) {
logHandle, err := eventlog.Open(name)
if err != nil {
return nil, err
}
return &eventlogger{log: logHandle, debugAsInfo: debugAsInfo, wrap: fmter}, nil
}
func (s *eventlogger) Format(e *logrus.Entry) ([]byte, error) {
data, err := s.wrap.Format(e)
if err != nil {
fmt.Fprintf(os.Stderr, "eventlogger: can't format entry: %v\n", err)
return data, err
}
switch e.Level {
case logrus.PanicLevel:
fallthrough
case logrus.FatalLevel:
fallthrough
case logrus.ErrorLevel:
err = s.log.Error(102, e.Message)
case logrus.WarnLevel:
err = s.log.Warning(101, e.Message)
case logrus.InfoLevel:
err = s.log.Info(100, e.Message)
case logrus.DebugLevel:
if s.debugAsInfo {
err = s.log.Info(100, e.Message)
}
default:
err = s.log.Info(100, e.Message)
}
if err != nil {
fmt.Fprintf(os.Stderr, "eventlogger: can't send log to eventlog: %v\n", err)
}
return data, err
}

View file

@ -1,365 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package log
import (
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/url"
"os"
"runtime"
"strconv"
"strings"
"github.com/Sirupsen/logrus"
)
type levelFlag string
// String implements flag.Value.
func (f levelFlag) String() string {
return fmt.Sprintf("%q", origLogger.Level.String())
}
// Set implements flag.Value.
func (f levelFlag) Set(level string) error {
l, err := logrus.ParseLevel(level)
if err != nil {
return err
}
origLogger.Level = l
return nil
}
// setSyslogFormatter is nil if the target architecture does not support syslog.
var setSyslogFormatter func(string, string) error
// setEventlogFormatter is nil if the target OS does not support Eventlog (i.e., is not Windows).
var setEventlogFormatter func(string, bool) error
func setJSONFormatter() {
origLogger.Formatter = &logrus.JSONFormatter{}
}
type logFormatFlag url.URL
// String implements flag.Value.
func (f logFormatFlag) String() string {
u := url.URL(f)
return fmt.Sprintf("%q", u.String())
}
// Set implements flag.Value.
func (f logFormatFlag) Set(format string) error {
u, err := url.Parse(format)
if err != nil {
return err
}
if u.Scheme != "logger" {
return fmt.Errorf("invalid scheme %s", u.Scheme)
}
jsonq := u.Query().Get("json")
if jsonq == "true" {
setJSONFormatter()
}
switch u.Opaque {
case "syslog":
if setSyslogFormatter == nil {
return fmt.Errorf("system does not support syslog")
}
appname := u.Query().Get("appname")
facility := u.Query().Get("local")
return setSyslogFormatter(appname, facility)
case "eventlog":
if setEventlogFormatter == nil {
return fmt.Errorf("system does not support eventlog")
}
name := u.Query().Get("name")
debugAsInfo := false
debugAsInfoRaw := u.Query().Get("debugAsInfo")
if parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil {
debugAsInfo = parsedDebugAsInfo
}
return setEventlogFormatter(name, debugAsInfo)
case "stdout":
origLogger.Out = os.Stdout
case "stderr":
origLogger.Out = os.Stderr
default:
return fmt.Errorf("unsupported logger %q", u.Opaque)
}
return nil
}
func init() {
AddFlags(flag.CommandLine)
}
// AddFlags adds the flags used by this package to the given FlagSet. That's
// useful if working with a custom FlagSet. The init function of this package
// adds the flags to flag.CommandLine anyway. Thus, it's usually enough to call
// flag.Parse() to make the logging flags take effect.
func AddFlags(fs *flag.FlagSet) {
fs.Var(
levelFlag(origLogger.Level.String()),
"log.level",
"Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]",
)
fs.Var(
logFormatFlag(url.URL{Scheme: "logger", Opaque: "stderr"}),
"log.format",
`Set the log target and format. Example: "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"`,
)
}
// Logger is the interface for loggers used in the Prometheus components.
type Logger interface {
Debug(...interface{})
Debugln(...interface{})
Debugf(string, ...interface{})
Info(...interface{})
Infoln(...interface{})
Infof(string, ...interface{})
Warn(...interface{})
Warnln(...interface{})
Warnf(string, ...interface{})
Error(...interface{})
Errorln(...interface{})
Errorf(string, ...interface{})
Fatal(...interface{})
Fatalln(...interface{})
Fatalf(string, ...interface{})
With(key string, value interface{}) Logger
}
type logger struct {
entry *logrus.Entry
}
func (l logger) With(key string, value interface{}) Logger {
return logger{l.entry.WithField(key, value)}
}
// Debug logs a message at level Debug on the standard logger.
func (l logger) Debug(args ...interface{}) {
l.sourced().Debug(args...)
}
// Debug logs a message at level Debug on the standard logger.
func (l logger) Debugln(args ...interface{}) {
l.sourced().Debugln(args...)
}
// Debugf logs a message at level Debug on the standard logger.
func (l logger) Debugf(format string, args ...interface{}) {
l.sourced().Debugf(format, args...)
}
// Info logs a message at level Info on the standard logger.
func (l logger) Info(args ...interface{}) {
l.sourced().Info(args...)
}
// Info logs a message at level Info on the standard logger.
func (l logger) Infoln(args ...interface{}) {
l.sourced().Infoln(args...)
}
// Infof logs a message at level Info on the standard logger.
func (l logger) Infof(format string, args ...interface{}) {
l.sourced().Infof(format, args...)
}
// Warn logs a message at level Warn on the standard logger.
func (l logger) Warn(args ...interface{}) {
l.sourced().Warn(args...)
}
// Warn logs a message at level Warn on the standard logger.
func (l logger) Warnln(args ...interface{}) {
l.sourced().Warnln(args...)
}
// Warnf logs a message at level Warn on the standard logger.
func (l logger) Warnf(format string, args ...interface{}) {
l.sourced().Warnf(format, args...)
}
// Error logs a message at level Error on the standard logger.
func (l logger) Error(args ...interface{}) {
l.sourced().Error(args...)
}
// Error logs a message at level Error on the standard logger.
func (l logger) Errorln(args ...interface{}) {
l.sourced().Errorln(args...)
}
// Errorf logs a message at level Error on the standard logger.
func (l logger) Errorf(format string, args ...interface{}) {
l.sourced().Errorf(format, args...)
}
// Fatal logs a message at level Fatal on the standard logger.
func (l logger) Fatal(args ...interface{}) {
l.sourced().Fatal(args...)
}
// Fatal logs a message at level Fatal on the standard logger.
func (l logger) Fatalln(args ...interface{}) {
l.sourced().Fatalln(args...)
}
// Fatalf logs a message at level Fatal on the standard logger.
func (l logger) Fatalf(format string, args ...interface{}) {
l.sourced().Fatalf(format, args...)
}
// sourced adds a source field to the logger that contains
// the file name and line where the logging happened.
func (l logger) sourced() *logrus.Entry {
_, file, line, ok := runtime.Caller(2)
if !ok {
file = "<???>"
line = 1
} else {
slash := strings.LastIndex(file, "/")
file = file[slash+1:]
}
return l.entry.WithField("source", fmt.Sprintf("%s:%d", file, line))
}
var origLogger = logrus.New()
var baseLogger = logger{entry: logrus.NewEntry(origLogger)}
// Base returns the default Logger logging to
func Base() Logger {
return baseLogger
}
// NewLogger returns a new Logger logging to out.
func NewLogger(w io.Writer) Logger {
l := logrus.New()
l.Out = w
return logger{entry: logrus.NewEntry(l)}
}
// NewNopLogger returns a logger that discards all log messages.
func NewNopLogger() Logger {
l := logrus.New()
l.Out = ioutil.Discard
return logger{entry: logrus.NewEntry(l)}
}
// With adds a field to the logger.
func With(key string, value interface{}) Logger {
return baseLogger.With(key, value)
}
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
baseLogger.sourced().Debug(args...)
}
// Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) {
baseLogger.sourced().Debugln(args...)
}
// Debugf logs a message at level Debug on the standard logger.
func Debugf(format string, args ...interface{}) {
baseLogger.sourced().Debugf(format, args...)
}
// Info logs a message at level Info on the standard logger.
func Info(args ...interface{}) {
baseLogger.sourced().Info(args...)
}
// Infoln logs a message at level Info on the standard logger.
func Infoln(args ...interface{}) {
baseLogger.sourced().Infoln(args...)
}
// Infof logs a message at level Info on the standard logger.
func Infof(format string, args ...interface{}) {
baseLogger.sourced().Infof(format, args...)
}
// Warn logs a message at level Warn on the standard logger.
func Warn(args ...interface{}) {
baseLogger.sourced().Warn(args...)
}
// Warnln logs a message at level Warn on the standard logger.
func Warnln(args ...interface{}) {
baseLogger.sourced().Warnln(args...)
}
// Warnf logs a message at level Warn on the standard logger.
func Warnf(format string, args ...interface{}) {
baseLogger.sourced().Warnf(format, args...)
}
// Error logs a message at level Error on the standard logger.
func Error(args ...interface{}) {
baseLogger.sourced().Error(args...)
}
// Errorln logs a message at level Error on the standard logger.
func Errorln(args ...interface{}) {
baseLogger.sourced().Errorln(args...)
}
// Errorf logs a message at level Error on the standard logger.
func Errorf(format string, args ...interface{}) {
baseLogger.sourced().Errorf(format, args...)
}
// Fatal logs a message at level Fatal on the standard logger.
func Fatal(args ...interface{}) {
baseLogger.sourced().Fatal(args...)
}
// Fatalln logs a message at level Fatal on the standard logger.
func Fatalln(args ...interface{}) {
baseLogger.sourced().Fatalln(args...)
}
// Fatalf logs a message at level Fatal on the standard logger.
func Fatalf(format string, args ...interface{}) {
baseLogger.sourced().Fatalf(format, args...)
}
type errorLogWriter struct{}
func (errorLogWriter) Write(b []byte) (int, error) {
baseLogger.sourced().Error(string(b))
return len(b), nil
}
// NewErrorLogger returns a log.Logger that is meant to be used
// in the ErrorLog field of an http.Server to log HTTP server errors.
func NewErrorLogger() *log.Logger {
return log.New(&errorLogWriter{}, "", 0)
}

View file

@ -1,39 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package log
import (
"bytes"
"regexp"
"testing"
"github.com/Sirupsen/logrus"
)
func TestFileLineLogging(t *testing.T) {
var buf bytes.Buffer
origLogger.Out = &buf
origLogger.Formatter = &logrus.TextFormatter{
DisableColors: true,
}
// The default logging level should be "info".
Debug("This debug-level line should not show up in the output.")
Infof("This %s-level line should show up in the output.", "info")
re := `^time=".*" level=info msg="This info-level line should show up in the output." source="log_test.go:33" \n$`
if !regexp.MustCompile(re).Match(buf.Bytes()) {
t.Fatalf("%q did not match expected regex %q", buf.String(), re)
}
}

View file

@ -1,126 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows,!nacl,!plan9
package log
import (
"fmt"
"log/syslog"
"os"
"github.com/Sirupsen/logrus"
)
var _ logrus.Formatter = (*syslogger)(nil)
func init() {
setSyslogFormatter = func(appname, local string) error {
if appname == "" {
return fmt.Errorf("missing appname parameter")
}
if local == "" {
return fmt.Errorf("missing local parameter")
}
fmter, err := newSyslogger(appname, local, origLogger.Formatter)
if err != nil {
fmt.Fprintf(os.Stderr, "error creating syslog formatter: %v\n", err)
origLogger.Errorf("can't connect logger to syslog: %v", err)
return err
}
origLogger.Formatter = fmter
return nil
}
}
var prefixTag []byte
type syslogger struct {
wrap logrus.Formatter
out *syslog.Writer
}
func newSyslogger(appname string, facility string, fmter logrus.Formatter) (*syslogger, error) {
priority, err := getFacility(facility)
if err != nil {
return nil, err
}
out, err := syslog.New(priority, appname)
_, isJSON := fmter.(*logrus.JSONFormatter)
if isJSON {
// add cee tag to json formatted syslogs
prefixTag = []byte("@cee:")
}
return &syslogger{
out: out,
wrap: fmter,
}, err
}
func getFacility(facility string) (syslog.Priority, error) {
switch facility {
case "0":
return syslog.LOG_LOCAL0, nil
case "1":
return syslog.LOG_LOCAL1, nil
case "2":
return syslog.LOG_LOCAL2, nil
case "3":
return syslog.LOG_LOCAL3, nil
case "4":
return syslog.LOG_LOCAL4, nil
case "5":
return syslog.LOG_LOCAL5, nil
case "6":
return syslog.LOG_LOCAL6, nil
case "7":
return syslog.LOG_LOCAL7, nil
}
return syslog.LOG_LOCAL0, fmt.Errorf("invalid local(%s) for syslog", facility)
}
func (s *syslogger) Format(e *logrus.Entry) ([]byte, error) {
data, err := s.wrap.Format(e)
if err != nil {
fmt.Fprintf(os.Stderr, "syslogger: can't format entry: %v\n", err)
return data, err
}
// only append tag to data sent to syslog (line), not to what
// is returned
line := string(append(prefixTag, data...))
switch e.Level {
case logrus.PanicLevel:
err = s.out.Crit(line)
case logrus.FatalLevel:
err = s.out.Crit(line)
case logrus.ErrorLevel:
err = s.out.Err(line)
case logrus.WarnLevel:
err = s.out.Warning(line)
case logrus.InfoLevel:
err = s.out.Info(line)
case logrus.DebugLevel:
err = s.out.Debug(line)
default:
err = s.out.Notice(line)
}
if err != nil {
fmt.Fprintf(os.Stderr, "syslogger: can't send log to syslog: %v\n", err)
}
return data, err
}

View file

@ -1,52 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows,!nacl,!plan9
package log
import (
"errors"
"log/syslog"
"testing"
)
func TestGetFacility(t *testing.T) {
testCases := []struct {
facility string
expectedPriority syslog.Priority
expectedErr error
}{
{"0", syslog.LOG_LOCAL0, nil},
{"1", syslog.LOG_LOCAL1, nil},
{"2", syslog.LOG_LOCAL2, nil},
{"3", syslog.LOG_LOCAL3, nil},
{"4", syslog.LOG_LOCAL4, nil},
{"5", syslog.LOG_LOCAL5, nil},
{"6", syslog.LOG_LOCAL6, nil},
{"7", syslog.LOG_LOCAL7, nil},
{"8", syslog.LOG_LOCAL0, errors.New("invalid local(8) for syslog")},
}
for _, tc := range testCases {
priority, err := getFacility(tc.facility)
if err != tc.expectedErr {
if err.Error() != tc.expectedErr.Error() {
t.Errorf("want %s, got %s", tc.expectedErr.Error(), err.Error())
}
}
if priority != tc.expectedPriority {
t.Errorf("want %q, got %q", tc.expectedPriority, priority)
}
}
}

View file

@ -1,100 +0,0 @@
package route
import (
"net/http"
"github.com/julienschmidt/httprouter"
"golang.org/x/net/context"
)
type param string
// Param returns param p for the context.
func Param(ctx context.Context, p string) string {
return ctx.Value(param(p)).(string)
}
// WithParam returns a new context with param p set to v.
func WithParam(ctx context.Context, p, v string) context.Context {
return context.WithValue(ctx, param(p), v)
}
// Router wraps httprouter.Router and adds support for prefixed sub-routers
// and per-request context injections.
type Router struct {
rtr *httprouter.Router
prefix string
}
// New returns a new Router.
func New() *Router {
return &Router{
rtr: httprouter.New(),
}
}
// WithPrefix returns a router that prefixes all registered routes with prefix.
func (r *Router) WithPrefix(prefix string) *Router {
return &Router{rtr: r.rtr, prefix: r.prefix + prefix}
}
// handle turns a HandlerFunc into an httprouter.Handle.
func (r *Router) handle(h http.HandlerFunc) httprouter.Handle {
return func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
for _, p := range params {
ctx = context.WithValue(ctx, param(p.Key), p.Value)
}
h(w, req.WithContext(ctx))
}
}
// Get registers a new GET route.
func (r *Router) Get(path string, h http.HandlerFunc) {
r.rtr.GET(r.prefix+path, r.handle(h))
}
// Options registers a new OPTIONS route.
func (r *Router) Options(path string, h http.HandlerFunc) {
r.rtr.OPTIONS(r.prefix+path, r.handle(h))
}
// Del registers a new DELETE route.
func (r *Router) Del(path string, h http.HandlerFunc) {
r.rtr.DELETE(r.prefix+path, r.handle(h))
}
// Put registers a new PUT route.
func (r *Router) Put(path string, h http.HandlerFunc) {
r.rtr.PUT(r.prefix+path, r.handle(h))
}
// Post registers a new POST route.
func (r *Router) Post(path string, h http.HandlerFunc) {
r.rtr.POST(r.prefix+path, r.handle(h))
}
// Redirect takes an absolute path and sends an internal HTTP redirect for it,
// prefixed by the router's path prefix. Note that this method does not include
// functionality for handling relative paths or full URL redirects.
func (r *Router) Redirect(w http.ResponseWriter, req *http.Request, path string, code int) {
http.Redirect(w, req, r.prefix+path, code)
}
// ServeHTTP implements http.Handler.
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
r.rtr.ServeHTTP(w, req)
}
// FileServe returns a new http.HandlerFunc that serves files from dir.
// Using routes must provide the *filepath parameter.
func FileServe(dir string) http.HandlerFunc {
fs := http.FileServer(http.Dir(dir))
return func(w http.ResponseWriter, r *http.Request) {
r.URL.Path = Param(r.Context(), "filepath")
fs.ServeHTTP(w, r)
}
}

View file

@ -1,44 +0,0 @@
package route
import (
"net/http"
"net/http/httptest"
"testing"
)
func TestRedirect(t *testing.T) {
router := New().WithPrefix("/test/prefix")
w := httptest.NewRecorder()
r, err := http.NewRequest("GET", "http://localhost:9090/foo", nil)
if err != nil {
t.Fatalf("Error building test request: %s", err)
}
router.Redirect(w, r, "/some/endpoint", http.StatusFound)
if w.Code != http.StatusFound {
t.Fatalf("Unexpected redirect status code: got %d, want %d", w.Code, http.StatusFound)
}
want := "/test/prefix/some/endpoint"
got := w.Header()["Location"][0]
if want != got {
t.Fatalf("Unexpected redirect location: got %s, want %s", got, want)
}
}
func TestContext(t *testing.T) {
router := New()
router.Get("/test/:foo/", func(w http.ResponseWriter, r *http.Request) {
want := "bar"
got := Param(r.Context(), "foo")
if want != got {
t.Fatalf("Unexpected context value: want %q, got %q", want, got)
}
})
r, err := http.NewRequest("GET", "http://localhost:9090/test/bar/", nil)
if err != nil {
t.Fatalf("Error building test request: %s", err)
}
router.ServeHTTP(nil, r)
}

View file

@ -1,89 +0,0 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package version
import (
"bytes"
"fmt"
"runtime"
"strings"
"text/template"
"github.com/prometheus/client_golang/prometheus"
)
// Build information. Populated at build-time.
var (
Version string
Revision string
Branch string
BuildUser string
BuildDate string
GoVersion = runtime.Version()
)
// NewCollector returns a collector which exports metrics about current version information.
func NewCollector(program string) *prometheus.GaugeVec {
buildInfo := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: program,
Name: "build_info",
Help: fmt.Sprintf(
"A metric with a constant '1' value labeled by version, revision, branch, and goversion from which %s was built.",
program,
),
},
[]string{"version", "revision", "branch", "goversion"},
)
buildInfo.WithLabelValues(Version, Revision, Branch, GoVersion).Set(1)
return buildInfo
}
// versionInfoTmpl contains the template used by Info.
var versionInfoTmpl = `
{{.program}}, version {{.version}} (branch: {{.branch}}, revision: {{.revision}})
build user: {{.buildUser}}
build date: {{.buildDate}}
go version: {{.goVersion}}
`
// Print returns version information.
func Print(program string) string {
m := map[string]string{
"program": program,
"version": Version,
"revision": Revision,
"branch": Branch,
"buildUser": BuildUser,
"buildDate": BuildDate,
"goVersion": GoVersion,
}
t := template.Must(template.New("version").Parse(versionInfoTmpl))
var buf bytes.Buffer
if err := t.ExecuteTemplate(&buf, "version", m); err != nil {
panic(err)
}
return strings.TrimSpace(buf.String())
}
// Info returns version, branch and revision information.
func Info() string {
return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, Revision)
}
// BuildContext returns goVersion, buildUser and buildDate information.
func BuildContext() string {
return fmt.Sprintf("(go=%s, user=%s, date=%s)", GoVersion, BuildUser, BuildDate)
}

Binary file not shown.

View file

@ -1 +0,0 @@
vim

View file

@ -1,7 +0,0 @@
rchar: 750339
wchar: 818609
syscr: 7405
syscw: 5245
read_bytes: 1024
write_bytes: 2048
cancelled_write_bytes: -1024

View file

@ -1,17 +0,0 @@
Limit Soft Limit Hard Limit Units
Max cpu time unlimited unlimited seconds
Max file size unlimited unlimited bytes
Max data size unlimited unlimited bytes
Max stack size 8388608 unlimited bytes
Max core file size 0 unlimited bytes
Max resident set unlimited unlimited bytes
Max processes 62898 62898 processes
Max open files 2048 4096 files
Max locked memory 65536 65536 bytes
Max address space unlimited unlimited bytes
Max file locks unlimited unlimited locks
Max pending signals 62898 62898 signals
Max msgqueue size 819200 819200 bytes
Max nice priority 0 0
Max realtime priority 0 0
Max realtime timeout unlimited unlimited us

View file

@ -1,19 +0,0 @@
device rootfs mounted on / with fstype rootfs
device sysfs mounted on /sys with fstype sysfs
device proc mounted on /proc with fstype proc
device /dev/sda1 mounted on / with fstype ext4
device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1
opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none
age: 13968
caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255
nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured
sec: flavor=1,pseudoflavor=1
events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0
bytes: 1207640230 0 0 0 1210214218 0 295483 0
RPC iostats version: 1.0 p/v: 100003/4 (nfs)
xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726
per-op statistics
NULL: 0 0 0 0 0 0 0 0
READ: 1298 1298 0 207680 1210292152 6 79386 79407
WRITE: 0 0 0 0 0 0 0 0

View file

@ -1 +0,0 @@
26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0

View file

@ -1 +0,0 @@
ata_sff

View file

@ -1,17 +0,0 @@
Limit Soft Limit Hard Limit Units
Max cpu time unlimited unlimited seconds
Max file size unlimited unlimited bytes
Max data size unlimited unlimited bytes
Max stack size 8388608 unlimited bytes
Max core file size 0 unlimited bytes
Max resident set unlimited unlimited bytes
Max processes 29436 29436 processes
Max open files 1024 4096 files
Max locked memory 65536 65536 bytes
Max address space unlimited unlimited bytes
Max file locks unlimited unlimited locks
Max pending signals 29436 29436 signals
Max msgqueue size 819200 819200 bytes
Max nice priority 0 0
Max realtime priority 0 0
Max realtime timeout unlimited unlimited us

View file

@ -1 +0,0 @@
33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0

View file

@ -1,2 +0,0 @@
1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0
#!/bin/cat /proc/self/stat

View file

@ -1,3 +0,0 @@
Node 0, zone
Node 0, zone
Node 0, zone

View file

@ -1,3 +0,0 @@
Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0
Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0

View file

@ -1,3 +0,0 @@
Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0
Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0

View file

@ -1,23 +0,0 @@
extent_alloc 92447 97589 92448 93751
abt 0 0 0 0
blk_map 1767055 188820 184891 92447 92448 2140766 0
bmbt 0 0 0 0
dir 185039 92447 92444 136422
trans 706 944304 0
ig 185045 58807 0 126238 0 33637 22
log 2883 113448 9 17360 739
push_ail 945014 0 134260 15483 0 3940 464 159985 0 40
xstrat 92447 0
rw 107739 94045
attr 4 0 0 0
icluster 8677 7849 135802
vnodes 92601 0 0 0 92444 92444 92444 0
buf 2666287 7122 2659202 3599 2 7085 0 10297 7085
abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147
abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023
bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0
fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
qm 0 0 0 0 0 0 0 0
xpc 399724544 92823103 86219234
debug 0

View file

@ -1,26 +0,0 @@
Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9]
5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU]
md127 : active raid1 sdi2[0] sdj2[1]
312319552 blocks [2/2] [UU]
md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1]
248896 blocks [2/2] [UU]
md4 : inactive raid1 sda3[0] sdb3[1]
4883648 blocks [2/2] [UU]
md6 : active raid1 sdb2[2] sda2[0]
195310144 blocks [2/1] [U_]
[=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
md8 : active raid1 sdb1[1] sda1[0]
195310144 blocks [2/2] [UU]
[=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1]
7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU]
bitmap: 0/30 pages [0KB], 65536KB chunk
unused devices: <none>

View file

@ -1,14 +0,0 @@
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP C0A80016:0CEA wlc
-> C0A85216:0CEA Tunnel 100 248 2
-> C0A85318:0CEA Tunnel 100 248 2
-> C0A85315:0CEA Tunnel 100 248 1
TCP C0A80039:0CEA wlc
-> C0A85416:0CEA Tunnel 0 0 0
-> C0A85215:0CEA Tunnel 100 1499 0
-> C0A83215:0CEA Tunnel 100 1498 0
TCP C0A80037:0CEA wlc
-> C0A8321A:0CEA Tunnel 0 0 0
-> C0A83120:0CEA Tunnel 100 0 0

View file

@ -1,6 +0,0 @@
Total Incoming Outgoing Incoming Outgoing
Conns Packets Packets Bytes Bytes
16AA370 E33656E5 0 51D8C8883AB3 0
Conns/s Pkts/s Pkts/s Bytes/s Bytes/s
4 1FB3C 0 1282A8F 0

View file

@ -1,16 +0,0 @@
cpu 301854 612 111922 8979004 3552 2 3944 0 0 0
cpu0 44490 19 21045 1087069 220 1 3410 0 0 0
cpu1 47869 23 16474 1110787 591 0 46 0 0 0
cpu2 46504 36 15916 1112321 441 0 326 0 0 0
cpu3 47054 102 15683 1113230 533 0 60 0 0 0
cpu4 28413 25 10776 1140321 217 0 8 0 0 0
cpu5 29271 101 11586 1136270 672 0 30 0 0 0
cpu6 29152 36 10276 1139721 319 0 29 0 0 0
cpu7 29098 268 10164 1139282 555 0 31 0 0 0
intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
ctxt 38014093
btime 1418183276
processes 26442
procs_running 2
procs_blocked 0
softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444

View file

@ -1,2 +0,0 @@
This directory contains some empty files that are the symlinks the files in the "fd" directory point to.
They are otherwise ignored by the tests

View file

@ -1,16 +0,0 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package sysfs provides functions to retrieve system and kernel metrics
// from the pseudo-filesystem sys.
package sysfs

Some files were not shown because too many files have changed in this diff Show more