remove NewRelic

Signed-off-by: David van der Spek <vanderspek.david@gmail.com>
This commit is contained in:
David van der Spek 2023-08-18 12:16:18 +02:00
parent 97957b12b1
commit 77c33cd243
Failed to extract signature
53 changed files with 1 additions and 4640 deletions

View file

@ -631,8 +631,6 @@ type Ignore struct {
type Reporting struct { type Reporting struct {
// Bugsnag configures error reporting for Bugsnag (bugsnag.com). // Bugsnag configures error reporting for Bugsnag (bugsnag.com).
Bugsnag BugsnagReporting `yaml:"bugsnag,omitempty"` Bugsnag BugsnagReporting `yaml:"bugsnag,omitempty"`
// NewRelic configures error reporting for NewRelic (newrelic.com)
NewRelic NewRelicReporting `yaml:"newrelic,omitempty"`
} }
// BugsnagReporting configures error reporting for Bugsnag (bugsnag.com). // BugsnagReporting configures error reporting for Bugsnag (bugsnag.com).
@ -646,16 +644,6 @@ type BugsnagReporting struct {
Endpoint string `yaml:"endpoint,omitempty"` Endpoint string `yaml:"endpoint,omitempty"`
} }
// NewRelicReporting configures error reporting for NewRelic (newrelic.com)
type NewRelicReporting struct {
// LicenseKey is the NewRelic user license key
LicenseKey string `yaml:"licensekey,omitempty"`
// Name is the component name of the registry in NewRelic
Name string `yaml:"name,omitempty"`
// Verbose configures debug output to STDOUT
Verbose bool `yaml:"verbose,omitempty"`
}
// Middleware configures named middlewares to be applied at injection points. // Middleware configures named middlewares to be applied at injection points.
type Middleware struct { type Middleware struct {
// Name the middleware registers itself as // Name the middleware registers itself as

View file

@ -453,13 +453,9 @@ func (suite *ConfigSuite) TestParseInvalidLoglevel(c *check.C) {
func (suite *ConfigSuite) TestParseWithDifferentEnvReporting(c *check.C) { func (suite *ConfigSuite) TestParseWithDifferentEnvReporting(c *check.C) {
suite.expectedConfig.Reporting.Bugsnag.APIKey = "anotherBugsnagApiKey" suite.expectedConfig.Reporting.Bugsnag.APIKey = "anotherBugsnagApiKey"
suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080" suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080"
suite.expectedConfig.Reporting.NewRelic.LicenseKey = "NewRelicLicenseKey"
suite.expectedConfig.Reporting.NewRelic.Name = "some NewRelic NAME"
os.Setenv("REGISTRY_REPORTING_BUGSNAG_APIKEY", "anotherBugsnagApiKey") os.Setenv("REGISTRY_REPORTING_BUGSNAG_APIKEY", "anotherBugsnagApiKey")
os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080") os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080")
os.Setenv("REGISTRY_REPORTING_NEWRELIC_LICENSEKEY", "NewRelicLicenseKey")
os.Setenv("REGISTRY_REPORTING_NEWRELIC_NAME", "some NewRelic NAME")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -485,8 +481,6 @@ func (suite *ConfigSuite) TestParseExtraneousVars(c *check.C) {
os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080") os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080")
// Environment variables which shouldn't set config items // Environment variables which shouldn't set config items
os.Setenv("registry_REPORTING_NEWRELIC_LICENSEKEY", "NewRelicLicenseKey")
os.Setenv("REPORTING_NEWRELIC_NAME", "some NewRelic NAME")
os.Setenv("REGISTRY_DUCKS", "quack") os.Setenv("REGISTRY_DUCKS", "quack")
os.Setenv("REGISTRY_REPORTING_ASDF", "ghjk") os.Setenv("REGISTRY_REPORTING_ASDF", "ghjk")
@ -619,7 +613,6 @@ func copyConfig(config Configuration) *Configuration {
} }
configCopy.Reporting = Reporting{ configCopy.Reporting = Reporting{
Bugsnag: BugsnagReporting{config.Reporting.Bugsnag.APIKey, config.Reporting.Bugsnag.ReleaseStage, config.Reporting.Bugsnag.Endpoint}, Bugsnag: BugsnagReporting{config.Reporting.Bugsnag.APIKey, config.Reporting.Bugsnag.ReleaseStage, config.Reporting.Bugsnag.Endpoint},
NewRelic: NewRelicReporting{config.Reporting.NewRelic.LicenseKey, config.Reporting.NewRelic.Name, config.Reporting.NewRelic.Verbose},
} }
configCopy.Auth = Auth{config.Auth.Type(): Parameters{}} configCopy.Auth = Auth{config.Auth.Type(): Parameters{}}

View file

@ -197,10 +197,6 @@ reporting:
apikey: bugsnagapikey apikey: bugsnagapikey
releasestage: bugsnagreleasestage releasestage: bugsnagreleasestage
endpoint: bugsnagendpoint endpoint: bugsnagendpoint
newrelic:
licensekey: newreliclicensekey
name: newrelicname
verbose: true
http: http:
addr: localhost:5000 addr: localhost:5000
prefix: /my/nested/registry/ prefix: /my/nested/registry/
@ -711,17 +707,12 @@ reporting:
apikey: bugsnagapikey apikey: bugsnagapikey
releasestage: bugsnagreleasestage releasestage: bugsnagreleasestage
endpoint: bugsnagendpoint endpoint: bugsnagendpoint
newrelic:
licensekey: newreliclicensekey
name: newrelicname
verbose: true
``` ```
The `reporting` option is **optional** and configures error and metrics The `reporting` option is **optional** and configures error and metrics
reporting tools. At the moment only two services are supported: reporting tools. At the moment only two services are supported:
- [Bugsnag](#bugsnag) - [Bugsnag](#bugsnag)
- [New Relic](#new-relic)
A valid configuration may contain both. A valid configuration may contain both.
@ -733,14 +724,6 @@ A valid configuration may contain both.
| `releasestage` | no | Tracks where the registry is deployed, using a string like `production`, `staging`, or `development`.| | `releasestage` | no | Tracks where the registry is deployed, using a string like `production`, `staging`, or `development`.|
| `endpoint`| no | The enterprise Bugsnag endpoint. | | `endpoint`| no | The enterprise Bugsnag endpoint. |
### `newrelic`
| Parameter | Required | Description |
|-----------|----------|-------------------------------------------------------|
| `licensekey` | yes | License key provided by New Relic. |
| `name` | no | New Relic application name. |
| `verbose`| no | Set to `true` to enable New Relic debugging output on `stdout`. |
## `http` ## `http`
```none ```none

3
go.mod
View file

@ -26,7 +26,6 @@ require (
github.com/prometheus/client_golang v1.12.1 // indirect; updated to latest github.com/prometheus/client_golang v1.12.1 // indirect; updated to latest
github.com/sirupsen/logrus v1.8.1 github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v1.6.1 github.com/spf13/cobra v1.6.1
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50
golang.org/x/crypto v0.7.0 golang.org/x/crypto v0.7.0
golang.org/x/oauth2 v0.6.0 golang.org/x/oauth2 v0.6.0
google.golang.org/api v0.114.0 google.golang.org/api v0.114.0
@ -62,8 +61,6 @@ require (
github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect github.com/prometheus/procfs v0.7.3 // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 // indirect
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f // indirect
go.opencensus.io v0.24.0 // indirect go.opencensus.io v0.24.0 // indirect
golang.org/x/net v0.8.0 // indirect; updated for CVE-2022-27664, CVE-2022-41717 golang.org/x/net v0.8.0 // indirect; updated for CVE-2022-27664, CVE-2022-41717
golang.org/x/sys v0.6.0 // indirect golang.org/x/sys v0.6.0 // indirect

6
go.sum
View file

@ -308,12 +308,6 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI=
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE=
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=

View file

@ -20,7 +20,6 @@ import (
gorhandlers "github.com/gorilla/handlers" gorhandlers "github.com/gorilla/handlers"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/yvasiyarov/gorelic"
"golang.org/x/crypto/acme" "golang.org/x/crypto/acme"
"golang.org/x/crypto/acme/autocert" "golang.org/x/crypto/acme/autocert"
@ -353,19 +352,6 @@ func configureReporting(app *handlers.App) http.Handler {
handler = bugsnag.Handler(handler) handler = bugsnag.Handler(handler)
} }
if app.Config.Reporting.NewRelic.LicenseKey != "" {
agent := gorelic.NewAgent()
agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey
if app.Config.Reporting.NewRelic.Name != "" {
agent.NewrelicName = app.Config.Reporting.NewRelic.Name
}
agent.CollectHTTPStat = true
agent.Verbose = app.Config.Reporting.NewRelic.Verbose
agent.Run()
handler = agent.WrapHTTPHandler(handler)
}
return handler return handler
} }

View file

@ -1,9 +0,0 @@
*.[68]
*.a
*.out
*.swp
_obj
_testmain.go
cmd/metrics-bench/metrics-bench
cmd/metrics-example/metrics-example
cmd/never-read/never-read

View file

@ -1,29 +0,0 @@
Copyright 2012 Richard Crowley. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation
are those of the authors and should not be interpreted as representing
official policies, either expressed or implied, of Richard Crowley.

View file

@ -1,104 +0,0 @@
go-metrics
==========
Go port of Coda Hale's Metrics library: <https://github.com/codahale/metrics>.
Documentation: <http://godoc.org/github.com/rcrowley/go-metrics>.
Usage
-----
Create and update metrics:
```go
c := metrics.NewCounter()
metrics.Register("foo", c)
c.Inc(47)
g := metrics.NewGauge()
metrics.Register("bar", g)
g.Update(47)
s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028)
h := metrics.NewHistogram(s)
metrics.Register("baz", h)
h.Update(47)
m := metrics.NewMeter()
metrics.Register("quux", m)
m.Mark(47)
t := metrics.NewTimer()
metrics.Register("bang", t)
t.Time(func() {})
t.Update(47)
```
Periodically log every metric in human-readable form to standard error:
```go
go metrics.Log(metrics.DefaultRegistry, 60e9, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
```
Periodically log every metric in slightly-more-parseable form to syslog:
```go
w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
go metrics.Syslog(metrics.DefaultRegistry, 60e9, w)
```
Periodically emit every metric to Graphite:
```go
addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
go metrics.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr)
```
Periodically emit every metric into InfluxDB:
```go
import "github.com/rcrowley/go-metrics/influxdb"
go influxdb.Influxdb(metrics.DefaultRegistry, 10e9, &influxdb.Config{
Host: "127.0.0.1:8086",
Database: "metrics",
Username: "test",
Password: "test",
})
```
Periodically upload every metric to Librato:
```go
import "github.com/rcrowley/go-metrics/librato"
go librato.Librato(metrics.DefaultRegistry,
10e9, // interval
"example@example.com", // account owner email address
"token", // Librato API token
"hostname", // source
[]float64{0.95}, // precentiles to send
time.Millisecond, // time unit
)
```
Periodically emit every metric to StatHat:
```go
import "github.com/rcrowley/go-metrics/stathat"
go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com")
```
Installation
------------
```sh
go get github.com/rcrowley/go-metrics
```
StatHat support additionally requires their Go client:
```sh
go get github.com/stathat/go
```

View file

@ -1,112 +0,0 @@
package metrics
import "sync/atomic"
// Counters hold an int64 value that can be incremented and decremented.
type Counter interface {
Clear()
Count() int64
Dec(int64)
Inc(int64)
Snapshot() Counter
}
// GetOrRegisterCounter returns an existing Counter or constructs and registers
// a new StandardCounter.
func GetOrRegisterCounter(name string, r Registry) Counter {
if nil == r {
r = DefaultRegistry
}
return r.GetOrRegister(name, NewCounter).(Counter)
}
// NewCounter constructs a new StandardCounter.
func NewCounter() Counter {
if UseNilMetrics {
return NilCounter{}
}
return &StandardCounter{0}
}
// NewRegisteredCounter constructs and registers a new StandardCounter.
func NewRegisteredCounter(name string, r Registry) Counter {
c := NewCounter()
if nil == r {
r = DefaultRegistry
}
r.Register(name, c)
return c
}
// CounterSnapshot is a read-only copy of another Counter.
type CounterSnapshot int64
// Clear panics.
func (CounterSnapshot) Clear() {
panic("Clear called on a CounterSnapshot")
}
// Count returns the count at the time the snapshot was taken.
func (c CounterSnapshot) Count() int64 { return int64(c) }
// Dec panics.
func (CounterSnapshot) Dec(int64) {
panic("Dec called on a CounterSnapshot")
}
// Inc panics.
func (CounterSnapshot) Inc(int64) {
panic("Inc called on a CounterSnapshot")
}
// Snapshot returns the snapshot.
func (c CounterSnapshot) Snapshot() Counter { return c }
// NilCounter is a no-op Counter.
type NilCounter struct{}
// Clear is a no-op.
func (NilCounter) Clear() {}
// Count is a no-op.
func (NilCounter) Count() int64 { return 0 }
// Dec is a no-op.
func (NilCounter) Dec(i int64) {}
// Inc is a no-op.
func (NilCounter) Inc(i int64) {}
// Snapshot is a no-op.
func (NilCounter) Snapshot() Counter { return NilCounter{} }
// StandardCounter is the standard implementation of a Counter and uses the
// sync/atomic package to manage a single int64 value.
type StandardCounter struct {
count int64
}
// Clear sets the counter to zero.
func (c *StandardCounter) Clear() {
atomic.StoreInt64(&c.count, 0)
}
// Count returns the current count.
func (c *StandardCounter) Count() int64 {
return atomic.LoadInt64(&c.count)
}
// Dec decrements the counter by the given amount.
func (c *StandardCounter) Dec(i int64) {
atomic.AddInt64(&c.count, -i)
}
// Inc increments the counter by the given amount.
func (c *StandardCounter) Inc(i int64) {
atomic.AddInt64(&c.count, i)
}
// Snapshot returns a read-only copy of the counter.
func (c *StandardCounter) Snapshot() Counter {
return CounterSnapshot(c.Count())
}

View file

@ -1,76 +0,0 @@
package metrics
import (
"runtime/debug"
"time"
)
var (
debugMetrics struct {
GCStats struct {
LastGC Gauge
NumGC Gauge
Pause Histogram
//PauseQuantiles Histogram
PauseTotal Gauge
}
ReadGCStats Timer
}
gcStats debug.GCStats
)
// Capture new values for the Go garbage collector statistics exported in
// debug.GCStats. This is designed to be called as a goroutine.
func CaptureDebugGCStats(r Registry, d time.Duration) {
for _ = range time.Tick(d) {
CaptureDebugGCStatsOnce(r)
}
}
// Capture new values for the Go garbage collector statistics exported in
// debug.GCStats. This is designed to be called in a background goroutine.
// Giving a registry which has not been given to RegisterDebugGCStats will
// panic.
//
// Be careful (but much less so) with this because debug.ReadGCStats calls
// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world
// operation, isn't something you want to be doing all the time.
func CaptureDebugGCStatsOnce(r Registry) {
lastGC := gcStats.LastGC
t := time.Now()
debug.ReadGCStats(&gcStats)
debugMetrics.ReadGCStats.UpdateSince(t)
debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano()))
debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC))
if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) {
debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0]))
}
//debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles)
debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal))
}
// Register metrics for the Go garbage collector statistics exported in
// debug.GCStats. The metrics are named by their fully-qualified Go symbols,
// i.e. debug.GCStats.PauseTotal.
func RegisterDebugGCStats(r Registry) {
debugMetrics.GCStats.LastGC = NewGauge()
debugMetrics.GCStats.NumGC = NewGauge()
debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015))
//debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015))
debugMetrics.GCStats.PauseTotal = NewGauge()
debugMetrics.ReadGCStats = NewTimer()
r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC)
r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC)
r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause)
//r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles)
r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal)
r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats)
}
// Allocate an initial slice for gcStats.Pause to avoid allocations during
// normal operation.
func init() {
gcStats.Pause = make([]time.Duration, 11)
}

View file

@ -1,118 +0,0 @@
package metrics
import (
"math"
"sync"
"sync/atomic"
)
// EWMAs continuously calculate an exponentially-weighted moving average
// based on an outside source of clock ticks.
type EWMA interface {
Rate() float64
Snapshot() EWMA
Tick()
Update(int64)
}
// NewEWMA constructs a new EWMA with the given alpha.
func NewEWMA(alpha float64) EWMA {
if UseNilMetrics {
return NilEWMA{}
}
return &StandardEWMA{alpha: alpha}
}
// NewEWMA1 constructs a new EWMA for a one-minute moving average.
func NewEWMA1() EWMA {
return NewEWMA(1 - math.Exp(-5.0/60.0/1))
}
// NewEWMA5 constructs a new EWMA for a five-minute moving average.
func NewEWMA5() EWMA {
return NewEWMA(1 - math.Exp(-5.0/60.0/5))
}
// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
func NewEWMA15() EWMA {
return NewEWMA(1 - math.Exp(-5.0/60.0/15))
}
// EWMASnapshot is a read-only copy of another EWMA.
type EWMASnapshot float64
// Rate returns the rate of events per second at the time the snapshot was
// taken.
func (a EWMASnapshot) Rate() float64 { return float64(a) }
// Snapshot returns the snapshot.
func (a EWMASnapshot) Snapshot() EWMA { return a }
// Tick panics.
func (EWMASnapshot) Tick() {
panic("Tick called on an EWMASnapshot")
}
// Update panics.
func (EWMASnapshot) Update(int64) {
panic("Update called on an EWMASnapshot")
}
// NilEWMA is a no-op EWMA.
type NilEWMA struct{}
// Rate is a no-op.
func (NilEWMA) Rate() float64 { return 0.0 }
// Snapshot is a no-op.
func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
// Tick is a no-op.
func (NilEWMA) Tick() {}
// Update is a no-op.
func (NilEWMA) Update(n int64) {}
// StandardEWMA is the standard implementation of an EWMA and tracks the number
// of uncounted events and processes them on each tick. It uses the
// sync/atomic package to manage uncounted events.
type StandardEWMA struct {
uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
alpha float64
rate float64
init bool
mutex sync.Mutex
}
// Rate returns the moving average rate of events per second.
func (a *StandardEWMA) Rate() float64 {
a.mutex.Lock()
defer a.mutex.Unlock()
return a.rate * float64(1e9)
}
// Snapshot returns a read-only copy of the EWMA.
func (a *StandardEWMA) Snapshot() EWMA {
return EWMASnapshot(a.Rate())
}
// Tick ticks the clock to update the moving average. It assumes it is called
// every five seconds.
func (a *StandardEWMA) Tick() {
count := atomic.LoadInt64(&a.uncounted)
atomic.AddInt64(&a.uncounted, -count)
instantRate := float64(count) / float64(5e9)
a.mutex.Lock()
defer a.mutex.Unlock()
if a.init {
a.rate += a.alpha * (instantRate - a.rate)
} else {
a.init = true
a.rate = instantRate
}
}
// Update adds n uncounted events.
func (a *StandardEWMA) Update(n int64) {
atomic.AddInt64(&a.uncounted, n)
}

View file

@ -1,84 +0,0 @@
package metrics
import "sync/atomic"
// Gauges hold an int64 value that can be set arbitrarily.
type Gauge interface {
Snapshot() Gauge
Update(int64)
Value() int64
}
// GetOrRegisterGauge returns an existing Gauge or constructs and registers a
// new StandardGauge.
func GetOrRegisterGauge(name string, r Registry) Gauge {
if nil == r {
r = DefaultRegistry
}
return r.GetOrRegister(name, NewGauge).(Gauge)
}
// NewGauge constructs a new StandardGauge.
func NewGauge() Gauge {
if UseNilMetrics {
return NilGauge{}
}
return &StandardGauge{0}
}
// NewRegisteredGauge constructs and registers a new StandardGauge.
func NewRegisteredGauge(name string, r Registry) Gauge {
c := NewGauge()
if nil == r {
r = DefaultRegistry
}
r.Register(name, c)
return c
}
// GaugeSnapshot is a read-only copy of another Gauge.
type GaugeSnapshot int64
// Snapshot returns the snapshot.
func (g GaugeSnapshot) Snapshot() Gauge { return g }
// Update panics.
func (GaugeSnapshot) Update(int64) {
panic("Update called on a GaugeSnapshot")
}
// Value returns the value at the time the snapshot was taken.
func (g GaugeSnapshot) Value() int64 { return int64(g) }
// NilGauge is a no-op Gauge.
type NilGauge struct{}
// Snapshot is a no-op.
func (NilGauge) Snapshot() Gauge { return NilGauge{} }
// Update is a no-op.
func (NilGauge) Update(v int64) {}
// Value is a no-op.
func (NilGauge) Value() int64 { return 0 }
// StandardGauge is the standard implementation of a Gauge and uses the
// sync/atomic package to manage a single int64 value.
type StandardGauge struct {
value int64
}
// Snapshot returns a read-only copy of the gauge.
func (g *StandardGauge) Snapshot() Gauge {
return GaugeSnapshot(g.Value())
}
// Update updates the gauge's value.
func (g *StandardGauge) Update(v int64) {
atomic.StoreInt64(&g.value, v)
}
// Value returns the gauge's current value.
func (g *StandardGauge) Value() int64 {
return atomic.LoadInt64(&g.value)
}

View file

@ -1,91 +0,0 @@
package metrics
import "sync"
// GaugeFloat64s hold a float64 value that can be set arbitrarily.
type GaugeFloat64 interface {
Snapshot() GaugeFloat64
Update(float64)
Value() float64
}
// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a
// new StandardGaugeFloat64.
func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 {
if nil == r {
r = DefaultRegistry
}
return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64)
}
// NewGaugeFloat64 constructs a new StandardGaugeFloat64.
func NewGaugeFloat64() GaugeFloat64 {
if UseNilMetrics {
return NilGaugeFloat64{}
}
return &StandardGaugeFloat64{
value: 0.0,
}
}
// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64.
func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
c := NewGaugeFloat64()
if nil == r {
r = DefaultRegistry
}
r.Register(name, c)
return c
}
// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
type GaugeFloat64Snapshot float64
// Snapshot returns the snapshot.
func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g }
// Update panics.
func (GaugeFloat64Snapshot) Update(float64) {
panic("Update called on a GaugeFloat64Snapshot")
}
// Value returns the value at the time the snapshot was taken.
func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) }
// NilGauge is a no-op Gauge.
type NilGaugeFloat64 struct{}
// Snapshot is a no-op.
func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} }
// Update is a no-op.
func (NilGaugeFloat64) Update(v float64) {}
// Value is a no-op.
func (NilGaugeFloat64) Value() float64 { return 0.0 }
// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
// sync.Mutex to manage a single float64 value.
type StandardGaugeFloat64 struct {
mutex sync.Mutex
value float64
}
// Snapshot returns a read-only copy of the gauge.
func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
return GaugeFloat64Snapshot(g.Value())
}
// Update updates the gauge's value.
func (g *StandardGaugeFloat64) Update(v float64) {
g.mutex.Lock()
defer g.mutex.Unlock()
g.value = v
}
// Value returns the gauge's current value.
func (g *StandardGaugeFloat64) Value() float64 {
g.mutex.Lock()
defer g.mutex.Unlock()
return g.value
}

View file

@ -1,104 +0,0 @@
package metrics
import (
"bufio"
"fmt"
"log"
"net"
"strconv"
"strings"
"time"
)
// GraphiteConfig provides a container with configuration parameters for
// the Graphite exporter
type GraphiteConfig struct {
Addr *net.TCPAddr // Network address to connect to
Registry Registry // Registry to be exported
FlushInterval time.Duration // Flush interval
DurationUnit time.Duration // Time conversion unit for durations
Prefix string // Prefix to be prepended to metric names
Percentiles []float64 // Percentiles to export from timers and histograms
}
// Graphite is a blocking exporter function which reports metrics in r
// to a graphite server located at addr, flushing them every d duration
// and prepending metric names with prefix.
func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
GraphiteWithConfig(GraphiteConfig{
Addr: addr,
Registry: r,
FlushInterval: d,
DurationUnit: time.Nanosecond,
Prefix: prefix,
Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999},
})
}
// GraphiteWithConfig is a blocking exporter function just like Graphite,
// but it takes a GraphiteConfig instead.
func GraphiteWithConfig(c GraphiteConfig) {
for _ = range time.Tick(c.FlushInterval) {
if err := graphite(&c); nil != err {
log.Println(err)
}
}
}
func graphite(c *GraphiteConfig) error {
now := time.Now().Unix()
du := float64(c.DurationUnit)
conn, err := net.DialTCP("tcp", nil, c.Addr)
if nil != err {
return err
}
defer conn.Close()
w := bufio.NewWriter(conn)
c.Registry.Each(func(name string, i interface{}) {
switch metric := i.(type) {
case Counter:
fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now)
case Gauge:
fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now)
case GaugeFloat64:
fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now)
case Histogram:
h := metric.Snapshot()
ps := h.Percentiles(c.Percentiles)
fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now)
fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now)
fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now)
fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now)
fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now)
for psIdx, psKey := range c.Percentiles {
key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
}
case Meter:
m := metric.Snapshot()
fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now)
fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now)
fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now)
fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now)
fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now)
case Timer:
t := metric.Snapshot()
ps := t.Percentiles(c.Percentiles)
fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now)
fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, int64(du)*t.Min(), now)
fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, int64(du)*t.Max(), now)
fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, du*t.Mean(), now)
fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, du*t.StdDev(), now)
for psIdx, psKey := range c.Percentiles {
key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
}
fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now)
fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now)
fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now)
fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now)
}
w.Flush()
})
return nil
}

View file

@ -1,61 +0,0 @@
package metrics
// Healthchecks hold an error value describing an arbitrary up/down status.
type Healthcheck interface {
Check()
Error() error
Healthy()
Unhealthy(error)
}
// NewHealthcheck constructs a new Healthcheck which will use the given
// function to update its status.
func NewHealthcheck(f func(Healthcheck)) Healthcheck {
if UseNilMetrics {
return NilHealthcheck{}
}
return &StandardHealthcheck{nil, f}
}
// NilHealthcheck is a no-op.
type NilHealthcheck struct{}
// Check is a no-op.
func (NilHealthcheck) Check() {}
// Error is a no-op.
func (NilHealthcheck) Error() error { return nil }
// Healthy is a no-op.
func (NilHealthcheck) Healthy() {}
// Unhealthy is a no-op.
func (NilHealthcheck) Unhealthy(error) {}
// StandardHealthcheck is the standard implementation of a Healthcheck and
// stores the status and a function to call to update the status.
type StandardHealthcheck struct {
err error
f func(Healthcheck)
}
// Check runs the healthcheck function to update the healthcheck's status.
func (h *StandardHealthcheck) Check() {
h.f(h)
}
// Error returns the healthcheck's status, which will be nil if it is healthy.
func (h *StandardHealthcheck) Error() error {
return h.err
}
// Healthy marks the healthcheck as healthy.
func (h *StandardHealthcheck) Healthy() {
h.err = nil
}
// Unhealthy marks the healthcheck as unhealthy. The error is stored and
// may be retrieved by the Error method.
func (h *StandardHealthcheck) Unhealthy(err error) {
h.err = err
}

View file

@ -1,192 +0,0 @@
package metrics
// Histograms calculate distribution statistics from a series of int64 values.
type Histogram interface {
Clear()
Count() int64
Max() int64
Mean() float64
Min() int64
Percentile(float64) float64
Percentiles([]float64) []float64
Sample() Sample
Snapshot() Histogram
StdDev() float64
Update(int64)
Variance() float64
}
// GetOrRegisterHistogram returns an existing Histogram or constructs and
// registers a new StandardHistogram.
func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
if nil == r {
r = DefaultRegistry
}
return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram)
}
// NewHistogram constructs a new StandardHistogram from a Sample.
func NewHistogram(s Sample) Histogram {
if UseNilMetrics {
return NilHistogram{}
}
return &StandardHistogram{sample: s}
}
// NewRegisteredHistogram constructs and registers a new StandardHistogram from
// a Sample.
func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
c := NewHistogram(s)
if nil == r {
r = DefaultRegistry
}
r.Register(name, c)
return c
}
// HistogramSnapshot is a read-only copy of another Histogram.
type HistogramSnapshot struct {
sample *SampleSnapshot
}
// Clear panics.
func (*HistogramSnapshot) Clear() {
panic("Clear called on a HistogramSnapshot")
}
// Count returns the number of samples recorded at the time the snapshot was
// taken.
func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
// Max returns the maximum value in the sample at the time the snapshot was
// taken.
func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
// Mean returns the mean of the values in the sample at the time the snapshot
// was taken.
func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
// Min returns the minimum value in the sample at the time the snapshot was
// taken.
func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
// Percentile returns an arbitrary percentile of values in the sample at the
// time the snapshot was taken.
func (h *HistogramSnapshot) Percentile(p float64) float64 {
return h.sample.Percentile(p)
}
// Percentiles returns a slice of arbitrary percentiles of values in the sample
// at the time the snapshot was taken.
func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
return h.sample.Percentiles(ps)
}
// Sample returns the Sample underlying the histogram.
func (h *HistogramSnapshot) Sample() Sample { return h.sample }
// Snapshot returns the snapshot.
func (h *HistogramSnapshot) Snapshot() Histogram { return h }
// StdDev returns the standard deviation of the values in the sample at the
// time the snapshot was taken.
func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
// Update panics.
func (*HistogramSnapshot) Update(int64) {
panic("Update called on a HistogramSnapshot")
}
// Variance returns the variance of inputs at the time the snapshot was taken.
func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
// NilHistogram is a no-op Histogram.
type NilHistogram struct{}
// Clear is a no-op.
func (NilHistogram) Clear() {}
// Count is a no-op.
func (NilHistogram) Count() int64 { return 0 }
// Max is a no-op.
func (NilHistogram) Max() int64 { return 0 }
// Mean is a no-op.
func (NilHistogram) Mean() float64 { return 0.0 }
// Min is a no-op.
func (NilHistogram) Min() int64 { return 0 }
// Percentile is a no-op.
func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
// Percentiles is a no-op.
func (NilHistogram) Percentiles(ps []float64) []float64 {
return make([]float64, len(ps))
}
// Sample is a no-op.
func (NilHistogram) Sample() Sample { return NilSample{} }
// Snapshot is a no-op.
func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
// StdDev is a no-op.
func (NilHistogram) StdDev() float64 { return 0.0 }
// Update is a no-op.
func (NilHistogram) Update(v int64) {}
// Variance is a no-op.
func (NilHistogram) Variance() float64 { return 0.0 }
// StandardHistogram is the standard implementation of a Histogram and uses a
// Sample to bound its memory use.
type StandardHistogram struct {
sample Sample
}
// Clear clears the histogram and its sample.
func (h *StandardHistogram) Clear() { h.sample.Clear() }
// Count returns the number of samples recorded since the histogram was last
// cleared.
func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
// Max returns the maximum value in the sample.
func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
// Mean returns the mean of the values in the sample.
func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
// Min returns the minimum value in the sample.
func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
// Percentile returns an arbitrary percentile of the values in the sample.
func (h *StandardHistogram) Percentile(p float64) float64 {
return h.sample.Percentile(p)
}
// Percentiles returns a slice of arbitrary percentiles of the values in the
// sample.
func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
return h.sample.Percentiles(ps)
}
// Sample returns the Sample underlying the histogram.
func (h *StandardHistogram) Sample() Sample { return h.sample }
// Snapshot returns a read-only copy of the histogram.
func (h *StandardHistogram) Snapshot() Histogram {
return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
}
// StdDev returns the standard deviation of the values in the sample.
func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
// Update samples a new value.
func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
// Variance returns the variance of the values in the sample.
func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }

View file

@ -1,83 +0,0 @@
package metrics
import (
"encoding/json"
"io"
"time"
)
// MarshalJSON returns a byte slice containing a JSON representation of all
// the metrics in the Registry.
func (r StandardRegistry) MarshalJSON() ([]byte, error) {
data := make(map[string]map[string]interface{})
r.Each(func(name string, i interface{}) {
values := make(map[string]interface{})
switch metric := i.(type) {
case Counter:
values["count"] = metric.Count()
case Gauge:
values["value"] = metric.Value()
case GaugeFloat64:
values["value"] = metric.Value()
case Healthcheck:
values["error"] = nil
metric.Check()
if err := metric.Error(); nil != err {
values["error"] = metric.Error().Error()
}
case Histogram:
h := metric.Snapshot()
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
values["count"] = h.Count()
values["min"] = h.Min()
values["max"] = h.Max()
values["mean"] = h.Mean()
values["stddev"] = h.StdDev()
values["median"] = ps[0]
values["75%"] = ps[1]
values["95%"] = ps[2]
values["99%"] = ps[3]
values["99.9%"] = ps[4]
case Meter:
m := metric.Snapshot()
values["count"] = m.Count()
values["1m.rate"] = m.Rate1()
values["5m.rate"] = m.Rate5()
values["15m.rate"] = m.Rate15()
values["mean.rate"] = m.RateMean()
case Timer:
t := metric.Snapshot()
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
values["count"] = t.Count()
values["min"] = t.Min()
values["max"] = t.Max()
values["mean"] = t.Mean()
values["stddev"] = t.StdDev()
values["median"] = ps[0]
values["75%"] = ps[1]
values["95%"] = ps[2]
values["99%"] = ps[3]
values["99.9%"] = ps[4]
values["1m.rate"] = t.Rate1()
values["5m.rate"] = t.Rate5()
values["15m.rate"] = t.Rate15()
values["mean.rate"] = t.RateMean()
}
data[name] = values
})
return json.Marshal(data)
}
// WriteJSON writes metrics from the given registry periodically to the
// specified io.Writer as JSON.
func WriteJSON(r Registry, d time.Duration, w io.Writer) {
for _ = range time.Tick(d) {
WriteJSONOnce(r, w)
}
}
// WriteJSONOnce writes metrics from the given registry to the specified
// io.Writer as JSON.
func WriteJSONOnce(r Registry, w io.Writer) {
json.NewEncoder(w).Encode(r)
}

View file

@ -1,70 +0,0 @@
package metrics
import (
"log"
"time"
)
// Output each metric in the given registry periodically using the given
// logger.
func Log(r Registry, d time.Duration, l *log.Logger) {
for _ = range time.Tick(d) {
r.Each(func(name string, i interface{}) {
switch metric := i.(type) {
case Counter:
l.Printf("counter %s\n", name)
l.Printf(" count: %9d\n", metric.Count())
case Gauge:
l.Printf("gauge %s\n", name)
l.Printf(" value: %9d\n", metric.Value())
case GaugeFloat64:
l.Printf("gauge %s\n", name)
l.Printf(" value: %f\n", metric.Value())
case Healthcheck:
metric.Check()
l.Printf("healthcheck %s\n", name)
l.Printf(" error: %v\n", metric.Error())
case Histogram:
h := metric.Snapshot()
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
l.Printf("histogram %s\n", name)
l.Printf(" count: %9d\n", h.Count())
l.Printf(" min: %9d\n", h.Min())
l.Printf(" max: %9d\n", h.Max())
l.Printf(" mean: %12.2f\n", h.Mean())
l.Printf(" stddev: %12.2f\n", h.StdDev())
l.Printf(" median: %12.2f\n", ps[0])
l.Printf(" 75%%: %12.2f\n", ps[1])
l.Printf(" 95%%: %12.2f\n", ps[2])
l.Printf(" 99%%: %12.2f\n", ps[3])
l.Printf(" 99.9%%: %12.2f\n", ps[4])
case Meter:
m := metric.Snapshot()
l.Printf("meter %s\n", name)
l.Printf(" count: %9d\n", m.Count())
l.Printf(" 1-min rate: %12.2f\n", m.Rate1())
l.Printf(" 5-min rate: %12.2f\n", m.Rate5())
l.Printf(" 15-min rate: %12.2f\n", m.Rate15())
l.Printf(" mean rate: %12.2f\n", m.RateMean())
case Timer:
t := metric.Snapshot()
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
l.Printf("timer %s\n", name)
l.Printf(" count: %9d\n", t.Count())
l.Printf(" min: %9d\n", t.Min())
l.Printf(" max: %9d\n", t.Max())
l.Printf(" mean: %12.2f\n", t.Mean())
l.Printf(" stddev: %12.2f\n", t.StdDev())
l.Printf(" median: %12.2f\n", ps[0])
l.Printf(" 75%%: %12.2f\n", ps[1])
l.Printf(" 95%%: %12.2f\n", ps[2])
l.Printf(" 99%%: %12.2f\n", ps[3])
l.Printf(" 99.9%%: %12.2f\n", ps[4])
l.Printf(" 1-min rate: %12.2f\n", t.Rate1())
l.Printf(" 5-min rate: %12.2f\n", t.Rate5())
l.Printf(" 15-min rate: %12.2f\n", t.Rate15())
l.Printf(" mean rate: %12.2f\n", t.RateMean())
}
})
}
}

View file

@ -1,285 +0,0 @@
Memory usage
============
(Highly unscientific.)
Command used to gather static memory usage:
```sh
grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status"
```
Program used to gather baseline memory usage:
```go
package main
import "time"
func main() {
time.Sleep(600e9)
}
```
Baseline
--------
```
VmPeak: 42604 kB
VmSize: 42604 kB
VmLck: 0 kB
VmHWM: 1120 kB
VmRSS: 1120 kB
VmData: 35460 kB
VmStk: 136 kB
VmExe: 1020 kB
VmLib: 1848 kB
VmPTE: 36 kB
VmSwap: 0 kB
```
Program used to gather metric memory usage (with other metrics being similar):
```go
package main
import (
"fmt"
"metrics"
"time"
)
func main() {
fmt.Sprintf("foo")
metrics.NewRegistry()
time.Sleep(600e9)
}
```
1000 counters registered
------------------------
```
VmPeak: 44016 kB
VmSize: 44016 kB
VmLck: 0 kB
VmHWM: 1928 kB
VmRSS: 1928 kB
VmData: 36868 kB
VmStk: 136 kB
VmExe: 1024 kB
VmLib: 1848 kB
VmPTE: 40 kB
VmSwap: 0 kB
```
**1.412 kB virtual, TODO 0.808 kB resident per counter.**
100000 counters registered
--------------------------
```
VmPeak: 55024 kB
VmSize: 55024 kB
VmLck: 0 kB
VmHWM: 12440 kB
VmRSS: 12440 kB
VmData: 47876 kB
VmStk: 136 kB
VmExe: 1024 kB
VmLib: 1848 kB
VmPTE: 64 kB
VmSwap: 0 kB
```
**0.1242 kB virtual, 0.1132 kB resident per counter.**
1000 gauges registered
----------------------
```
VmPeak: 44012 kB
VmSize: 44012 kB
VmLck: 0 kB
VmHWM: 1928 kB
VmRSS: 1928 kB
VmData: 36868 kB
VmStk: 136 kB
VmExe: 1020 kB
VmLib: 1848 kB
VmPTE: 40 kB
VmSwap: 0 kB
```
**1.408 kB virtual, 0.808 kB resident per counter.**
100000 gauges registered
------------------------
```
VmPeak: 55020 kB
VmSize: 55020 kB
VmLck: 0 kB
VmHWM: 12432 kB
VmRSS: 12432 kB
VmData: 47876 kB
VmStk: 136 kB
VmExe: 1020 kB
VmLib: 1848 kB
VmPTE: 60 kB
VmSwap: 0 kB
```
**0.12416 kB virtual, 0.11312 resident per gauge.**
1000 histograms with a uniform sample size of 1028
--------------------------------------------------
```
VmPeak: 72272 kB
VmSize: 72272 kB
VmLck: 0 kB
VmHWM: 16204 kB
VmRSS: 16204 kB
VmData: 65100 kB
VmStk: 136 kB
VmExe: 1048 kB
VmLib: 1848 kB
VmPTE: 80 kB
VmSwap: 0 kB
```
**29.668 kB virtual, TODO 15.084 resident per histogram.**
10000 histograms with a uniform sample size of 1028
---------------------------------------------------
```
VmPeak: 256912 kB
VmSize: 256912 kB
VmLck: 0 kB
VmHWM: 146204 kB
VmRSS: 146204 kB
VmData: 249740 kB
VmStk: 136 kB
VmExe: 1048 kB
VmLib: 1848 kB
VmPTE: 448 kB
VmSwap: 0 kB
```
**21.4308 kB virtual, 14.5084 kB resident per histogram.**
50000 histograms with a uniform sample size of 1028
---------------------------------------------------
```
VmPeak: 908112 kB
VmSize: 908112 kB
VmLck: 0 kB
VmHWM: 645832 kB
VmRSS: 645588 kB
VmData: 900940 kB
VmStk: 136 kB
VmExe: 1048 kB
VmLib: 1848 kB
VmPTE: 1716 kB
VmSwap: 1544 kB
```
**17.31016 kB virtual, 12.88936 kB resident per histogram.**
1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
-------------------------------------------------------------------------------------
```
VmPeak: 62480 kB
VmSize: 62480 kB
VmLck: 0 kB
VmHWM: 11572 kB
VmRSS: 11572 kB
VmData: 55308 kB
VmStk: 136 kB
VmExe: 1048 kB
VmLib: 1848 kB
VmPTE: 64 kB
VmSwap: 0 kB
```
**19.876 kB virtual, 10.452 kB resident per histogram.**
10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
--------------------------------------------------------------------------------------
```
VmPeak: 153296 kB
VmSize: 153296 kB
VmLck: 0 kB
VmHWM: 101176 kB
VmRSS: 101176 kB
VmData: 146124 kB
VmStk: 136 kB
VmExe: 1048 kB
VmLib: 1848 kB
VmPTE: 240 kB
VmSwap: 0 kB
```
**11.0692 kB virtual, 10.0056 kB resident per histogram.**
50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
--------------------------------------------------------------------------------------
```
VmPeak: 557264 kB
VmSize: 557264 kB
VmLck: 0 kB
VmHWM: 501056 kB
VmRSS: 501056 kB
VmData: 550092 kB
VmStk: 136 kB
VmExe: 1048 kB
VmLib: 1848 kB
VmPTE: 1032 kB
VmSwap: 0 kB
```
**10.2932 kB virtual, 9.99872 kB resident per histogram.**
1000 meters
-----------
```
VmPeak: 74504 kB
VmSize: 74504 kB
VmLck: 0 kB
VmHWM: 24124 kB
VmRSS: 24124 kB
VmData: 67340 kB
VmStk: 136 kB
VmExe: 1040 kB
VmLib: 1848 kB
VmPTE: 92 kB
VmSwap: 0 kB
```
**31.9 kB virtual, 23.004 kB resident per meter.**
10000 meters
------------
```
VmPeak: 278920 kB
VmSize: 278920 kB
VmLck: 0 kB
VmHWM: 227300 kB
VmRSS: 227300 kB
VmData: 271756 kB
VmStk: 136 kB
VmExe: 1040 kB
VmLib: 1848 kB
VmPTE: 488 kB
VmSwap: 0 kB
```
**23.6316 kB virtual, 22.618 kB resident per meter.**

View file

@ -1,233 +0,0 @@
package metrics
import (
"sync"
"time"
)
// Meters count events to produce exponentially-weighted moving average rates
// at one-, five-, and fifteen-minutes and a mean rate.
type Meter interface {
Count() int64
Mark(int64)
Rate1() float64
Rate5() float64
Rate15() float64
RateMean() float64
Snapshot() Meter
}
// GetOrRegisterMeter returns an existing Meter or constructs and registers a
// new StandardMeter.
func GetOrRegisterMeter(name string, r Registry) Meter {
if nil == r {
r = DefaultRegistry
}
return r.GetOrRegister(name, NewMeter).(Meter)
}
// NewMeter constructs a new StandardMeter and launches a goroutine.
func NewMeter() Meter {
if UseNilMetrics {
return NilMeter{}
}
m := newStandardMeter()
arbiter.Lock()
defer arbiter.Unlock()
arbiter.meters = append(arbiter.meters, m)
if !arbiter.started {
arbiter.started = true
go arbiter.tick()
}
return m
}
// NewMeter constructs and registers a new StandardMeter and launches a
// goroutine.
func NewRegisteredMeter(name string, r Registry) Meter {
c := NewMeter()
if nil == r {
r = DefaultRegistry
}
r.Register(name, c)
return c
}
// MeterSnapshot is a read-only copy of another Meter.
type MeterSnapshot struct {
count int64
rate1, rate5, rate15, rateMean float64
}
// Count returns the count of events at the time the snapshot was taken.
func (m *MeterSnapshot) Count() int64 { return m.count }
// Mark panics.
func (*MeterSnapshot) Mark(n int64) {
panic("Mark called on a MeterSnapshot")
}
// Rate1 returns the one-minute moving average rate of events per second at the
// time the snapshot was taken.
func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
// Rate5 returns the five-minute moving average rate of events per second at
// the time the snapshot was taken.
func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
// Rate15 returns the fifteen-minute moving average rate of events per second
// at the time the snapshot was taken.
func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
// RateMean returns the meter's mean rate of events per second at the time the
// snapshot was taken.
func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
// Snapshot returns the snapshot.
func (m *MeterSnapshot) Snapshot() Meter { return m }
// NilMeter is a no-op Meter.
type NilMeter struct{}
// Count is a no-op.
func (NilMeter) Count() int64 { return 0 }
// Mark is a no-op.
func (NilMeter) Mark(n int64) {}
// Rate1 is a no-op.
func (NilMeter) Rate1() float64 { return 0.0 }
// Rate5 is a no-op.
func (NilMeter) Rate5() float64 { return 0.0 }
// Rate15is a no-op.
func (NilMeter) Rate15() float64 { return 0.0 }
// RateMean is a no-op.
func (NilMeter) RateMean() float64 { return 0.0 }
// Snapshot is a no-op.
func (NilMeter) Snapshot() Meter { return NilMeter{} }
// StandardMeter is the standard implementation of a Meter.
type StandardMeter struct {
lock sync.RWMutex
snapshot *MeterSnapshot
a1, a5, a15 EWMA
startTime time.Time
}
func newStandardMeter() *StandardMeter {
return &StandardMeter{
snapshot: &MeterSnapshot{},
a1: NewEWMA1(),
a5: NewEWMA5(),
a15: NewEWMA15(),
startTime: time.Now(),
}
}
// Count returns the number of events recorded.
func (m *StandardMeter) Count() int64 {
m.lock.RLock()
count := m.snapshot.count
m.lock.RUnlock()
return count
}
// Mark records the occurance of n events.
func (m *StandardMeter) Mark(n int64) {
m.lock.Lock()
defer m.lock.Unlock()
m.snapshot.count += n
m.a1.Update(n)
m.a5.Update(n)
m.a15.Update(n)
m.updateSnapshot()
}
// Rate1 returns the one-minute moving average rate of events per second.
func (m *StandardMeter) Rate1() float64 {
m.lock.RLock()
rate1 := m.snapshot.rate1
m.lock.RUnlock()
return rate1
}
// Rate5 returns the five-minute moving average rate of events per second.
func (m *StandardMeter) Rate5() float64 {
m.lock.RLock()
rate5 := m.snapshot.rate5
m.lock.RUnlock()
return rate5
}
// Rate15 returns the fifteen-minute moving average rate of events per second.
func (m *StandardMeter) Rate15() float64 {
m.lock.RLock()
rate15 := m.snapshot.rate15
m.lock.RUnlock()
return rate15
}
// RateMean returns the meter's mean rate of events per second.
func (m *StandardMeter) RateMean() float64 {
m.lock.RLock()
rateMean := m.snapshot.rateMean
m.lock.RUnlock()
return rateMean
}
// Snapshot returns a read-only copy of the meter.
func (m *StandardMeter) Snapshot() Meter {
m.lock.RLock()
snapshot := *m.snapshot
m.lock.RUnlock()
return &snapshot
}
func (m *StandardMeter) updateSnapshot() {
// should run with write lock held on m.lock
snapshot := m.snapshot
snapshot.rate1 = m.a1.Rate()
snapshot.rate5 = m.a5.Rate()
snapshot.rate15 = m.a15.Rate()
snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
}
func (m *StandardMeter) tick() {
m.lock.Lock()
defer m.lock.Unlock()
m.a1.Tick()
m.a5.Tick()
m.a15.Tick()
m.updateSnapshot()
}
type meterArbiter struct {
sync.RWMutex
started bool
meters []*StandardMeter
ticker *time.Ticker
}
var arbiter = meterArbiter{ticker: time.NewTicker(5e9)}
// Ticks meters on the scheduled interval
func (ma *meterArbiter) tick() {
for {
select {
case <-ma.ticker.C:
ma.tickMeters()
}
}
}
func (ma *meterArbiter) tickMeters() {
ma.RLock()
defer ma.RUnlock()
for _, meter := range ma.meters {
meter.tick()
}
}

View file

@ -1,13 +0,0 @@
// Go port of Coda Hale's Metrics library
//
// <https://github.com/rcrowley/go-metrics>
//
// Coda Hale's original work: <https://github.com/codahale/metrics>
package metrics
// UseNilMetrics is checked by the constructor functions for all of the
// standard metrics. If it is true, the metric returned is a stub.
//
// This global kill-switch helps quantify the observer effect and makes
// for less cluttered pprof profiles.
var UseNilMetrics bool = false

View file

@ -1,119 +0,0 @@
package metrics
import (
"bufio"
"fmt"
"log"
"net"
"time"
"os"
"strings"
)
var shortHostName string = ""
// OpenTSDBConfig provides a container with configuration parameters for
// the OpenTSDB exporter
type OpenTSDBConfig struct {
Addr *net.TCPAddr // Network address to connect to
Registry Registry // Registry to be exported
FlushInterval time.Duration // Flush interval
DurationUnit time.Duration // Time conversion unit for durations
Prefix string // Prefix to be prepended to metric names
}
// OpenTSDB is a blocking exporter function which reports metrics in r
// to a TSDB server located at addr, flushing them every d duration
// and prepending metric names with prefix.
func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
OpenTSDBWithConfig(OpenTSDBConfig{
Addr: addr,
Registry: r,
FlushInterval: d,
DurationUnit: time.Nanosecond,
Prefix: prefix,
})
}
// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB,
// but it takes a OpenTSDBConfig instead.
func OpenTSDBWithConfig(c OpenTSDBConfig) {
for _ = range time.Tick(c.FlushInterval) {
if err := openTSDB(&c); nil != err {
log.Println(err)
}
}
}
func getShortHostname() string {
if shortHostName == "" {
host, _ := os.Hostname()
if index := strings.Index(host, "."); index > 0 {
shortHostName = host[:index]
} else {
shortHostName = host
}
}
return shortHostName
}
func openTSDB(c *OpenTSDBConfig) error {
shortHostname := getShortHostname()
now := time.Now().Unix()
du := float64(c.DurationUnit)
conn, err := net.DialTCP("tcp", nil, c.Addr)
if nil != err {
return err
}
defer conn.Close()
w := bufio.NewWriter(conn)
c.Registry.Each(func(name string, i interface{}) {
switch metric := i.(type) {
case Counter:
fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
case Gauge:
fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
case GaugeFloat64:
fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
case Histogram:
h := metric.Snapshot()
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname)
fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname)
fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname)
fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname)
fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname)
fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname)
fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname)
fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname)
fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname)
fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname)
case Meter:
m := metric.Snapshot()
fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname)
fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname)
fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname)
fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname)
fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname)
case Timer:
t := metric.Snapshot()
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname)
fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, int64(du)*t.Min(), shortHostname)
fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, int64(du)*t.Max(), shortHostname)
fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, du*t.Mean(), shortHostname)
fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, du*t.StdDev(), shortHostname)
fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[0], shortHostname)
fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[1], shortHostname)
fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[2], shortHostname)
fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[3], shortHostname)
fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[4], shortHostname)
fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname)
fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname)
fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname)
fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname)
}
w.Flush()
})
return nil
}

View file

@ -1,168 +0,0 @@
package metrics
import (
"fmt"
"reflect"
"sync"
)
// DuplicateMetric is the error returned by Registry.Register when a metric
// already exists. If you mean to Register that metric you must first
// Unregister the existing metric.
type DuplicateMetric string
func (err DuplicateMetric) Error() string {
return fmt.Sprintf("duplicate metric: %s", string(err))
}
// A Registry holds references to a set of metrics by name and can iterate
// over them, calling callback functions provided by the user.
//
// This is an interface so as to encourage other structs to implement
// the Registry API as appropriate.
type Registry interface {
// Call the given function for each registered metric.
Each(func(string, interface{}))
// Get the metric by the given name or nil if none is registered.
Get(string) interface{}
// Gets an existing metric or registers the given one.
// The interface can be the metric to register if not found in registry,
// or a function returning the metric for lazy instantiation.
GetOrRegister(string, interface{}) interface{}
// Register the given metric under the given name.
Register(string, interface{}) error
// Run all registered healthchecks.
RunHealthchecks()
// Unregister the metric with the given name.
Unregister(string)
}
// The standard implementation of a Registry is a mutex-protected map
// of names to metrics.
type StandardRegistry struct {
metrics map[string]interface{}
mutex sync.Mutex
}
// Create a new registry.
func NewRegistry() Registry {
return &StandardRegistry{metrics: make(map[string]interface{})}
}
// Call the given function for each registered metric.
func (r *StandardRegistry) Each(f func(string, interface{})) {
for name, i := range r.registered() {
f(name, i)
}
}
// Get the metric by the given name or nil if none is registered.
func (r *StandardRegistry) Get(name string) interface{} {
r.mutex.Lock()
defer r.mutex.Unlock()
return r.metrics[name]
}
// Gets an existing metric or creates and registers a new one. Threadsafe
// alternative to calling Get and Register on failure.
// The interface can be the metric to register if not found in registry,
// or a function returning the metric for lazy instantiation.
func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} {
r.mutex.Lock()
defer r.mutex.Unlock()
if metric, ok := r.metrics[name]; ok {
return metric
}
if v := reflect.ValueOf(i); v.Kind() == reflect.Func {
i = v.Call(nil)[0].Interface()
}
r.register(name, i)
return i
}
// Register the given metric under the given name. Returns a DuplicateMetric
// if a metric by the given name is already registered.
func (r *StandardRegistry) Register(name string, i interface{}) error {
r.mutex.Lock()
defer r.mutex.Unlock()
return r.register(name, i)
}
// Run all registered healthchecks.
func (r *StandardRegistry) RunHealthchecks() {
r.mutex.Lock()
defer r.mutex.Unlock()
for _, i := range r.metrics {
if h, ok := i.(Healthcheck); ok {
h.Check()
}
}
}
// Unregister the metric with the given name.
func (r *StandardRegistry) Unregister(name string) {
r.mutex.Lock()
defer r.mutex.Unlock()
delete(r.metrics, name)
}
func (r *StandardRegistry) register(name string, i interface{}) error {
if _, ok := r.metrics[name]; ok {
return DuplicateMetric(name)
}
switch i.(type) {
case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer:
r.metrics[name] = i
}
return nil
}
func (r *StandardRegistry) registered() map[string]interface{} {
metrics := make(map[string]interface{}, len(r.metrics))
r.mutex.Lock()
defer r.mutex.Unlock()
for name, i := range r.metrics {
metrics[name] = i
}
return metrics
}
var DefaultRegistry Registry = NewRegistry()
// Call the given function for each registered metric.
func Each(f func(string, interface{})) {
DefaultRegistry.Each(f)
}
// Get the metric by the given name or nil if none is registered.
func Get(name string) interface{} {
return DefaultRegistry.Get(name)
}
// Gets an existing metric or creates and registers a new one. Threadsafe
// alternative to calling Get and Register on failure.
func GetOrRegister(name string, i interface{}) interface{} {
return DefaultRegistry.GetOrRegister(name, i)
}
// Register the given metric under the given name. Returns a DuplicateMetric
// if a metric by the given name is already registered.
func Register(name string, i interface{}) error {
return DefaultRegistry.Register(name, i)
}
// Run all registered healthchecks.
func RunHealthchecks() {
DefaultRegistry.RunHealthchecks()
}
// Unregister the metric with the given name.
func Unregister(name string) {
DefaultRegistry.Unregister(name)
}

View file

@ -1,200 +0,0 @@
package metrics
import (
"runtime"
"time"
)
var (
memStats runtime.MemStats
runtimeMetrics struct {
MemStats struct {
Alloc Gauge
BuckHashSys Gauge
DebugGC Gauge
EnableGC Gauge
Frees Gauge
HeapAlloc Gauge
HeapIdle Gauge
HeapInuse Gauge
HeapObjects Gauge
HeapReleased Gauge
HeapSys Gauge
LastGC Gauge
Lookups Gauge
Mallocs Gauge
MCacheInuse Gauge
MCacheSys Gauge
MSpanInuse Gauge
MSpanSys Gauge
NextGC Gauge
NumGC Gauge
PauseNs Histogram
PauseTotalNs Gauge
StackInuse Gauge
StackSys Gauge
Sys Gauge
TotalAlloc Gauge
}
NumCgoCall Gauge
NumGoroutine Gauge
ReadMemStats Timer
}
frees uint64
lookups uint64
mallocs uint64
numGC uint32
numCgoCalls int64
)
// Capture new values for the Go runtime statistics exported in
// runtime.MemStats. This is designed to be called as a goroutine.
func CaptureRuntimeMemStats(r Registry, d time.Duration) {
for _ = range time.Tick(d) {
CaptureRuntimeMemStatsOnce(r)
}
}
// Capture new values for the Go runtime statistics exported in
// runtime.MemStats. This is designed to be called in a background
// goroutine. Giving a registry which has not been given to
// RegisterRuntimeMemStats will panic.
//
// Be very careful with this because runtime.ReadMemStats calls the C
// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld()
// and that last one does what it says on the tin.
func CaptureRuntimeMemStatsOnce(r Registry) {
t := time.Now()
runtime.ReadMemStats(&memStats) // This takes 50-200us.
runtimeMetrics.ReadMemStats.UpdateSince(t)
runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc))
runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys))
if memStats.DebugGC {
runtimeMetrics.MemStats.DebugGC.Update(1)
} else {
runtimeMetrics.MemStats.DebugGC.Update(0)
}
if memStats.EnableGC {
runtimeMetrics.MemStats.EnableGC.Update(1)
} else {
runtimeMetrics.MemStats.EnableGC.Update(0)
}
runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees))
runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc))
runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle))
runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse))
runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects))
runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased))
runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys))
runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC))
runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups))
runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs))
runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse))
runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys))
runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse))
runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys))
runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC))
runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC))
// <https://code.google.com/p/go/source/browse/src/pkg/runtime/mgc0.c>
i := numGC % uint32(len(memStats.PauseNs))
ii := memStats.NumGC % uint32(len(memStats.PauseNs))
if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) {
for i = 0; i < uint32(len(memStats.PauseNs)); i++ {
runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
}
} else {
if i > ii {
for ; i < uint32(len(memStats.PauseNs)); i++ {
runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
}
i = 0
}
for ; i < ii; i++ {
runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
}
}
frees = memStats.Frees
lookups = memStats.Lookups
mallocs = memStats.Mallocs
numGC = memStats.NumGC
runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs))
runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse))
runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys))
runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys))
runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc))
currentNumCgoCalls := numCgoCall()
runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls)
numCgoCalls = currentNumCgoCalls
runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine()))
}
// Register runtimeMetrics for the Go runtime statistics exported in runtime and
// specifically runtime.MemStats. The runtimeMetrics are named by their
// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc.
func RegisterRuntimeMemStats(r Registry) {
runtimeMetrics.MemStats.Alloc = NewGauge()
runtimeMetrics.MemStats.BuckHashSys = NewGauge()
runtimeMetrics.MemStats.DebugGC = NewGauge()
runtimeMetrics.MemStats.EnableGC = NewGauge()
runtimeMetrics.MemStats.Frees = NewGauge()
runtimeMetrics.MemStats.HeapAlloc = NewGauge()
runtimeMetrics.MemStats.HeapIdle = NewGauge()
runtimeMetrics.MemStats.HeapInuse = NewGauge()
runtimeMetrics.MemStats.HeapObjects = NewGauge()
runtimeMetrics.MemStats.HeapReleased = NewGauge()
runtimeMetrics.MemStats.HeapSys = NewGauge()
runtimeMetrics.MemStats.LastGC = NewGauge()
runtimeMetrics.MemStats.Lookups = NewGauge()
runtimeMetrics.MemStats.Mallocs = NewGauge()
runtimeMetrics.MemStats.MCacheInuse = NewGauge()
runtimeMetrics.MemStats.MCacheSys = NewGauge()
runtimeMetrics.MemStats.MSpanInuse = NewGauge()
runtimeMetrics.MemStats.MSpanSys = NewGauge()
runtimeMetrics.MemStats.NextGC = NewGauge()
runtimeMetrics.MemStats.NumGC = NewGauge()
runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015))
runtimeMetrics.MemStats.PauseTotalNs = NewGauge()
runtimeMetrics.MemStats.StackInuse = NewGauge()
runtimeMetrics.MemStats.StackSys = NewGauge()
runtimeMetrics.MemStats.Sys = NewGauge()
runtimeMetrics.MemStats.TotalAlloc = NewGauge()
runtimeMetrics.NumCgoCall = NewGauge()
runtimeMetrics.NumGoroutine = NewGauge()
runtimeMetrics.ReadMemStats = NewTimer()
r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc)
r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys)
r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC)
r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC)
r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees)
r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc)
r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle)
r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse)
r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects)
r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased)
r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys)
r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC)
r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups)
r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs)
r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse)
r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys)
r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse)
r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys)
r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC)
r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC)
r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs)
r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs)
r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse)
r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys)
r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys)
r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc)
r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall)
r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine)
r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats)
}

View file

@ -1,9 +0,0 @@
// +build cgo
package metrics
import "runtime"
func numCgoCall() int64 {
return runtime.NumCgoCall()
}

View file

@ -1,7 +0,0 @@
// +build !cgo
package metrics
func numCgoCall() int64 {
return 0
}

View file

@ -1,568 +0,0 @@
package metrics
import (
"container/heap"
"math"
"math/rand"
"sort"
"sync"
"time"
)
const rescaleThreshold = time.Hour
// Samples maintain a statistically-significant selection of values from
// a stream.
type Sample interface {
Clear()
Count() int64
Max() int64
Mean() float64
Min() int64
Percentile(float64) float64
Percentiles([]float64) []float64
Size() int
Snapshot() Sample
StdDev() float64
Sum() int64
Update(int64)
Values() []int64
Variance() float64
}
// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time
// Decay Model for Streaming Systems".
//
// <http://www.research.att.com/people/Cormode_Graham/library/publications/CormodeShkapenyukSrivastavaXu09.pdf>
type ExpDecaySample struct {
alpha float64
count int64
mutex sync.Mutex
reservoirSize int
t0, t1 time.Time
values expDecaySampleHeap
}
// NewExpDecaySample constructs a new exponentially-decaying sample with the
// given reservoir size and alpha.
func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
if UseNilMetrics {
return NilSample{}
}
s := &ExpDecaySample{
alpha: alpha,
reservoirSize: reservoirSize,
t0: time.Now(),
values: make(expDecaySampleHeap, 0, reservoirSize),
}
s.t1 = time.Now().Add(rescaleThreshold)
return s
}
// Clear clears all samples.
func (s *ExpDecaySample) Clear() {
s.mutex.Lock()
defer s.mutex.Unlock()
s.count = 0
s.t0 = time.Now()
s.t1 = s.t0.Add(rescaleThreshold)
s.values = make(expDecaySampleHeap, 0, s.reservoirSize)
}
// Count returns the number of samples recorded, which may exceed the
// reservoir size.
func (s *ExpDecaySample) Count() int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.count
}
// Max returns the maximum value in the sample, which may not be the maximum
// value ever to be part of the sample.
func (s *ExpDecaySample) Max() int64 {
return SampleMax(s.Values())
}
// Mean returns the mean of the values in the sample.
func (s *ExpDecaySample) Mean() float64 {
return SampleMean(s.Values())
}
// Min returns the minimum value in the sample, which may not be the minimum
// value ever to be part of the sample.
func (s *ExpDecaySample) Min() int64 {
return SampleMin(s.Values())
}
// Percentile returns an arbitrary percentile of values in the sample.
func (s *ExpDecaySample) Percentile(p float64) float64 {
return SamplePercentile(s.Values(), p)
}
// Percentiles returns a slice of arbitrary percentiles of values in the
// sample.
func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
return SamplePercentiles(s.Values(), ps)
}
// Size returns the size of the sample, which is at most the reservoir size.
func (s *ExpDecaySample) Size() int {
s.mutex.Lock()
defer s.mutex.Unlock()
return len(s.values)
}
// Snapshot returns a read-only copy of the sample.
func (s *ExpDecaySample) Snapshot() Sample {
s.mutex.Lock()
defer s.mutex.Unlock()
values := make([]int64, len(s.values))
for i, v := range s.values {
values[i] = v.v
}
return &SampleSnapshot{
count: s.count,
values: values,
}
}
// StdDev returns the standard deviation of the values in the sample.
func (s *ExpDecaySample) StdDev() float64 {
return SampleStdDev(s.Values())
}
// Sum returns the sum of the values in the sample.
func (s *ExpDecaySample) Sum() int64 {
return SampleSum(s.Values())
}
// Update samples a new value.
func (s *ExpDecaySample) Update(v int64) {
s.update(time.Now(), v)
}
// Values returns a copy of the values in the sample.
func (s *ExpDecaySample) Values() []int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
values := make([]int64, len(s.values))
for i, v := range s.values {
values[i] = v.v
}
return values
}
// Variance returns the variance of the values in the sample.
func (s *ExpDecaySample) Variance() float64 {
return SampleVariance(s.Values())
}
// update samples a new value at a particular timestamp. This is a method all
// its own to facilitate testing.
func (s *ExpDecaySample) update(t time.Time, v int64) {
s.mutex.Lock()
defer s.mutex.Unlock()
s.count++
if len(s.values) == s.reservoirSize {
heap.Pop(&s.values)
}
heap.Push(&s.values, expDecaySample{
k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
v: v,
})
if t.After(s.t1) {
values := s.values
t0 := s.t0
s.values = make(expDecaySampleHeap, 0, s.reservoirSize)
s.t0 = t
s.t1 = s.t0.Add(rescaleThreshold)
for _, v := range values {
v.k = v.k * math.Exp(-s.alpha*float64(s.t0.Sub(t0)))
heap.Push(&s.values, v)
}
}
}
// NilSample is a no-op Sample.
type NilSample struct{}
// Clear is a no-op.
func (NilSample) Clear() {}
// Count is a no-op.
func (NilSample) Count() int64 { return 0 }
// Max is a no-op.
func (NilSample) Max() int64 { return 0 }
// Mean is a no-op.
func (NilSample) Mean() float64 { return 0.0 }
// Min is a no-op.
func (NilSample) Min() int64 { return 0 }
// Percentile is a no-op.
func (NilSample) Percentile(p float64) float64 { return 0.0 }
// Percentiles is a no-op.
func (NilSample) Percentiles(ps []float64) []float64 {
return make([]float64, len(ps))
}
// Size is a no-op.
func (NilSample) Size() int { return 0 }
// Sample is a no-op.
func (NilSample) Snapshot() Sample { return NilSample{} }
// StdDev is a no-op.
func (NilSample) StdDev() float64 { return 0.0 }
// Sum is a no-op.
func (NilSample) Sum() int64 { return 0 }
// Update is a no-op.
func (NilSample) Update(v int64) {}
// Values is a no-op.
func (NilSample) Values() []int64 { return []int64{} }
// Variance is a no-op.
func (NilSample) Variance() float64 { return 0.0 }
// SampleMax returns the maximum value of the slice of int64.
func SampleMax(values []int64) int64 {
if 0 == len(values) {
return 0
}
var max int64 = math.MinInt64
for _, v := range values {
if max < v {
max = v
}
}
return max
}
// SampleMean returns the mean value of the slice of int64.
func SampleMean(values []int64) float64 {
if 0 == len(values) {
return 0.0
}
return float64(SampleSum(values)) / float64(len(values))
}
// SampleMin returns the minimum value of the slice of int64.
func SampleMin(values []int64) int64 {
if 0 == len(values) {
return 0
}
var min int64 = math.MaxInt64
for _, v := range values {
if min > v {
min = v
}
}
return min
}
// SamplePercentiles returns an arbitrary percentile of the slice of int64.
func SamplePercentile(values int64Slice, p float64) float64 {
return SamplePercentiles(values, []float64{p})[0]
}
// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
// int64.
func SamplePercentiles(values int64Slice, ps []float64) []float64 {
scores := make([]float64, len(ps))
size := len(values)
if size > 0 {
sort.Sort(values)
for i, p := range ps {
pos := p * float64(size+1)
if pos < 1.0 {
scores[i] = float64(values[0])
} else if pos >= float64(size) {
scores[i] = float64(values[size-1])
} else {
lower := float64(values[int(pos)-1])
upper := float64(values[int(pos)])
scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
}
}
}
return scores
}
// SampleSnapshot is a read-only copy of another Sample.
type SampleSnapshot struct {
count int64
values []int64
}
// Clear panics.
func (*SampleSnapshot) Clear() {
panic("Clear called on a SampleSnapshot")
}
// Count returns the count of inputs at the time the snapshot was taken.
func (s *SampleSnapshot) Count() int64 { return s.count }
// Max returns the maximal value at the time the snapshot was taken.
func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
// Mean returns the mean value at the time the snapshot was taken.
func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
// Min returns the minimal value at the time the snapshot was taken.
func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
// Percentile returns an arbitrary percentile of values at the time the
// snapshot was taken.
func (s *SampleSnapshot) Percentile(p float64) float64 {
return SamplePercentile(s.values, p)
}
// Percentiles returns a slice of arbitrary percentiles of values at the time
// the snapshot was taken.
func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
return SamplePercentiles(s.values, ps)
}
// Size returns the size of the sample at the time the snapshot was taken.
func (s *SampleSnapshot) Size() int { return len(s.values) }
// Snapshot returns the snapshot.
func (s *SampleSnapshot) Snapshot() Sample { return s }
// StdDev returns the standard deviation of values at the time the snapshot was
// taken.
func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
// Sum returns the sum of values at the time the snapshot was taken.
func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
// Update panics.
func (*SampleSnapshot) Update(int64) {
panic("Update called on a SampleSnapshot")
}
// Values returns a copy of the values in the sample.
func (s *SampleSnapshot) Values() []int64 {
values := make([]int64, len(s.values))
copy(values, s.values)
return values
}
// Variance returns the variance of values at the time the snapshot was taken.
func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
// SampleStdDev returns the standard deviation of the slice of int64.
func SampleStdDev(values []int64) float64 {
return math.Sqrt(SampleVariance(values))
}
// SampleSum returns the sum of the slice of int64.
func SampleSum(values []int64) int64 {
var sum int64
for _, v := range values {
sum += v
}
return sum
}
// SampleVariance returns the variance of the slice of int64.
func SampleVariance(values []int64) float64 {
if 0 == len(values) {
return 0.0
}
m := SampleMean(values)
var sum float64
for _, v := range values {
d := float64(v) - m
sum += d * d
}
return sum / float64(len(values))
}
// A uniform sample using Vitter's Algorithm R.
//
// <http://www.cs.umd.edu/~samir/498/vitter.pdf>
type UniformSample struct {
count int64
mutex sync.Mutex
reservoirSize int
values []int64
}
// NewUniformSample constructs a new uniform sample with the given reservoir
// size.
func NewUniformSample(reservoirSize int) Sample {
if UseNilMetrics {
return NilSample{}
}
return &UniformSample{
reservoirSize: reservoirSize,
values: make([]int64, 0, reservoirSize),
}
}
// Clear clears all samples.
func (s *UniformSample) Clear() {
s.mutex.Lock()
defer s.mutex.Unlock()
s.count = 0
s.values = make([]int64, 0, s.reservoirSize)
}
// Count returns the number of samples recorded, which may exceed the
// reservoir size.
func (s *UniformSample) Count() int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.count
}
// Max returns the maximum value in the sample, which may not be the maximum
// value ever to be part of the sample.
func (s *UniformSample) Max() int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SampleMax(s.values)
}
// Mean returns the mean of the values in the sample.
func (s *UniformSample) Mean() float64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SampleMean(s.values)
}
// Min returns the minimum value in the sample, which may not be the minimum
// value ever to be part of the sample.
func (s *UniformSample) Min() int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SampleMin(s.values)
}
// Percentile returns an arbitrary percentile of values in the sample.
func (s *UniformSample) Percentile(p float64) float64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SamplePercentile(s.values, p)
}
// Percentiles returns a slice of arbitrary percentiles of values in the
// sample.
func (s *UniformSample) Percentiles(ps []float64) []float64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SamplePercentiles(s.values, ps)
}
// Size returns the size of the sample, which is at most the reservoir size.
func (s *UniformSample) Size() int {
s.mutex.Lock()
defer s.mutex.Unlock()
return len(s.values)
}
// Snapshot returns a read-only copy of the sample.
func (s *UniformSample) Snapshot() Sample {
s.mutex.Lock()
defer s.mutex.Unlock()
values := make([]int64, len(s.values))
copy(values, s.values)
return &SampleSnapshot{
count: s.count,
values: values,
}
}
// StdDev returns the standard deviation of the values in the sample.
func (s *UniformSample) StdDev() float64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SampleStdDev(s.values)
}
// Sum returns the sum of the values in the sample.
func (s *UniformSample) Sum() int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SampleSum(s.values)
}
// Update samples a new value.
func (s *UniformSample) Update(v int64) {
s.mutex.Lock()
defer s.mutex.Unlock()
s.count++
if len(s.values) < s.reservoirSize {
s.values = append(s.values, v)
} else {
s.values[rand.Intn(s.reservoirSize)] = v
}
}
// Values returns a copy of the values in the sample.
func (s *UniformSample) Values() []int64 {
s.mutex.Lock()
defer s.mutex.Unlock()
values := make([]int64, len(s.values))
copy(values, s.values)
return values
}
// Variance returns the variance of the values in the sample.
func (s *UniformSample) Variance() float64 {
s.mutex.Lock()
defer s.mutex.Unlock()
return SampleVariance(s.values)
}
// expDecaySample represents an individual sample in a heap.
type expDecaySample struct {
k float64
v int64
}
// expDecaySampleHeap is a min-heap of expDecaySamples.
type expDecaySampleHeap []expDecaySample
func (q expDecaySampleHeap) Len() int {
return len(q)
}
func (q expDecaySampleHeap) Less(i, j int) bool {
return q[i].k < q[j].k
}
func (q *expDecaySampleHeap) Pop() interface{} {
q_ := *q
n := len(q_)
i := q_[n-1]
q_ = q_[0 : n-1]
*q = q_
return i
}
func (q *expDecaySampleHeap) Push(x interface{}) {
q_ := *q
n := len(q_)
q_ = q_[0 : n+1]
q_[n] = x.(expDecaySample)
*q = q_
}
func (q expDecaySampleHeap) Swap(i, j int) {
q[i], q[j] = q[j], q[i]
}
type int64Slice []int64
func (p int64Slice) Len() int { return len(p) }
func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

View file

@ -1,78 +0,0 @@
// +build !windows
package metrics
import (
"fmt"
"log/syslog"
"time"
)
// Output each metric in the given registry to syslog periodically using
// the given syslogger.
func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
for _ = range time.Tick(d) {
r.Each(func(name string, i interface{}) {
switch metric := i.(type) {
case Counter:
w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count()))
case Gauge:
w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value()))
case GaugeFloat64:
w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value()))
case Healthcheck:
metric.Check()
w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error()))
case Histogram:
h := metric.Snapshot()
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
w.Info(fmt.Sprintf(
"histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f",
name,
h.Count(),
h.Min(),
h.Max(),
h.Mean(),
h.StdDev(),
ps[0],
ps[1],
ps[2],
ps[3],
ps[4],
))
case Meter:
m := metric.Snapshot()
w.Info(fmt.Sprintf(
"meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f",
name,
m.Count(),
m.Rate1(),
m.Rate5(),
m.Rate15(),
m.RateMean(),
))
case Timer:
t := metric.Snapshot()
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
w.Info(fmt.Sprintf(
"timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f",
name,
t.Count(),
t.Min(),
t.Max(),
t.Mean(),
t.StdDev(),
ps[0],
ps[1],
ps[2],
ps[3],
ps[4],
t.Rate1(),
t.Rate5(),
t.Rate15(),
t.RateMean(),
))
}
})
}
}

View file

@ -1,299 +0,0 @@
package metrics
import (
"sync"
"time"
)
// Timers capture the duration and rate of events.
type Timer interface {
Count() int64
Max() int64
Mean() float64
Min() int64
Percentile(float64) float64
Percentiles([]float64) []float64
Rate1() float64
Rate5() float64
Rate15() float64
RateMean() float64
Snapshot() Timer
StdDev() float64
Time(func())
Update(time.Duration)
UpdateSince(time.Time)
Variance() float64
}
// GetOrRegisterTimer returns an existing Timer or constructs and registers a
// new StandardTimer.
func GetOrRegisterTimer(name string, r Registry) Timer {
if nil == r {
r = DefaultRegistry
}
return r.GetOrRegister(name, NewTimer).(Timer)
}
// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
func NewCustomTimer(h Histogram, m Meter) Timer {
if UseNilMetrics {
return NilTimer{}
}
return &StandardTimer{
histogram: h,
meter: m,
}
}
// NewRegisteredTimer constructs and registers a new StandardTimer.
func NewRegisteredTimer(name string, r Registry) Timer {
c := NewTimer()
if nil == r {
r = DefaultRegistry
}
r.Register(name, c)
return c
}
// NewTimer constructs a new StandardTimer using an exponentially-decaying
// sample with the same reservoir size and alpha as UNIX load averages.
func NewTimer() Timer {
if UseNilMetrics {
return NilTimer{}
}
return &StandardTimer{
histogram: NewHistogram(NewExpDecaySample(1028, 0.015)),
meter: NewMeter(),
}
}
// NilTimer is a no-op Timer.
type NilTimer struct {
h Histogram
m Meter
}
// Count is a no-op.
func (NilTimer) Count() int64 { return 0 }
// Max is a no-op.
func (NilTimer) Max() int64 { return 0 }
// Mean is a no-op.
func (NilTimer) Mean() float64 { return 0.0 }
// Min is a no-op.
func (NilTimer) Min() int64 { return 0 }
// Percentile is a no-op.
func (NilTimer) Percentile(p float64) float64 { return 0.0 }
// Percentiles is a no-op.
func (NilTimer) Percentiles(ps []float64) []float64 {
return make([]float64, len(ps))
}
// Rate1 is a no-op.
func (NilTimer) Rate1() float64 { return 0.0 }
// Rate5 is a no-op.
func (NilTimer) Rate5() float64 { return 0.0 }
// Rate15 is a no-op.
func (NilTimer) Rate15() float64 { return 0.0 }
// RateMean is a no-op.
func (NilTimer) RateMean() float64 { return 0.0 }
// Snapshot is a no-op.
func (NilTimer) Snapshot() Timer { return NilTimer{} }
// StdDev is a no-op.
func (NilTimer) StdDev() float64 { return 0.0 }
// Time is a no-op.
func (NilTimer) Time(func()) {}
// Update is a no-op.
func (NilTimer) Update(time.Duration) {}
// UpdateSince is a no-op.
func (NilTimer) UpdateSince(time.Time) {}
// Variance is a no-op.
func (NilTimer) Variance() float64 { return 0.0 }
// StandardTimer is the standard implementation of a Timer and uses a Histogram
// and Meter.
type StandardTimer struct {
histogram Histogram
meter Meter
mutex sync.Mutex
}
// Count returns the number of events recorded.
func (t *StandardTimer) Count() int64 {
return t.histogram.Count()
}
// Max returns the maximum value in the sample.
func (t *StandardTimer) Max() int64 {
return t.histogram.Max()
}
// Mean returns the mean of the values in the sample.
func (t *StandardTimer) Mean() float64 {
return t.histogram.Mean()
}
// Min returns the minimum value in the sample.
func (t *StandardTimer) Min() int64 {
return t.histogram.Min()
}
// Percentile returns an arbitrary percentile of the values in the sample.
func (t *StandardTimer) Percentile(p float64) float64 {
return t.histogram.Percentile(p)
}
// Percentiles returns a slice of arbitrary percentiles of the values in the
// sample.
func (t *StandardTimer) Percentiles(ps []float64) []float64 {
return t.histogram.Percentiles(ps)
}
// Rate1 returns the one-minute moving average rate of events per second.
func (t *StandardTimer) Rate1() float64 {
return t.meter.Rate1()
}
// Rate5 returns the five-minute moving average rate of events per second.
func (t *StandardTimer) Rate5() float64 {
return t.meter.Rate5()
}
// Rate15 returns the fifteen-minute moving average rate of events per second.
func (t *StandardTimer) Rate15() float64 {
return t.meter.Rate15()
}
// RateMean returns the meter's mean rate of events per second.
func (t *StandardTimer) RateMean() float64 {
return t.meter.RateMean()
}
// Snapshot returns a read-only copy of the timer.
func (t *StandardTimer) Snapshot() Timer {
t.mutex.Lock()
defer t.mutex.Unlock()
return &TimerSnapshot{
histogram: t.histogram.Snapshot().(*HistogramSnapshot),
meter: t.meter.Snapshot().(*MeterSnapshot),
}
}
// StdDev returns the standard deviation of the values in the sample.
func (t *StandardTimer) StdDev() float64 {
return t.histogram.StdDev()
}
// Record the duration of the execution of the given function.
func (t *StandardTimer) Time(f func()) {
ts := time.Now()
f()
t.Update(time.Since(ts))
}
// Record the duration of an event.
func (t *StandardTimer) Update(d time.Duration) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.histogram.Update(int64(d))
t.meter.Mark(1)
}
// Record the duration of an event that started at a time and ends now.
func (t *StandardTimer) UpdateSince(ts time.Time) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.histogram.Update(int64(time.Since(ts)))
t.meter.Mark(1)
}
// Variance returns the variance of the values in the sample.
func (t *StandardTimer) Variance() float64 {
return t.histogram.Variance()
}
// TimerSnapshot is a read-only copy of another Timer.
type TimerSnapshot struct {
histogram *HistogramSnapshot
meter *MeterSnapshot
}
// Count returns the number of events recorded at the time the snapshot was
// taken.
func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
// Max returns the maximum value at the time the snapshot was taken.
func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
// Mean returns the mean value at the time the snapshot was taken.
func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
// Min returns the minimum value at the time the snapshot was taken.
func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
// Percentile returns an arbitrary percentile of sampled values at the time the
// snapshot was taken.
func (t *TimerSnapshot) Percentile(p float64) float64 {
return t.histogram.Percentile(p)
}
// Percentiles returns a slice of arbitrary percentiles of sampled values at
// the time the snapshot was taken.
func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
return t.histogram.Percentiles(ps)
}
// Rate1 returns the one-minute moving average rate of events per second at the
// time the snapshot was taken.
func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
// Rate5 returns the five-minute moving average rate of events per second at
// the time the snapshot was taken.
func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
// Rate15 returns the fifteen-minute moving average rate of events per second
// at the time the snapshot was taken.
func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
// RateMean returns the meter's mean rate of events per second at the time the
// snapshot was taken.
func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
// Snapshot returns the snapshot.
func (t *TimerSnapshot) Snapshot() Timer { return t }
// StdDev returns the standard deviation of the values at the time the snapshot
// was taken.
func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
// Time panics.
func (*TimerSnapshot) Time(func()) {
panic("Time called on a TimerSnapshot")
}
// Update panics.
func (*TimerSnapshot) Update(time.Duration) {
panic("Update called on a TimerSnapshot")
}
// UpdateSince panics.
func (*TimerSnapshot) UpdateSince(time.Time) {
panic("UpdateSince called on a TimerSnapshot")
}
// Variance returns the variance of the values at the time the snapshot was
// taken.
func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }

View file

@ -1,100 +0,0 @@
package metrics
import (
"fmt"
"io"
"sort"
"time"
)
// Write sorts writes each metric in the given registry periodically to the
// given io.Writer.
func Write(r Registry, d time.Duration, w io.Writer) {
for _ = range time.Tick(d) {
WriteOnce(r, w)
}
}
// WriteOnce sorts and writes metrics in the given registry to the given
// io.Writer.
func WriteOnce(r Registry, w io.Writer) {
var namedMetrics namedMetricSlice
r.Each(func(name string, i interface{}) {
namedMetrics = append(namedMetrics, namedMetric{name, i})
})
sort.Sort(namedMetrics)
for _, namedMetric := range namedMetrics {
switch metric := namedMetric.m.(type) {
case Counter:
fmt.Fprintf(w, "counter %s\n", namedMetric.name)
fmt.Fprintf(w, " count: %9d\n", metric.Count())
case Gauge:
fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
fmt.Fprintf(w, " value: %9d\n", metric.Value())
case GaugeFloat64:
fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
fmt.Fprintf(w, " value: %f\n", metric.Value())
case Healthcheck:
metric.Check()
fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name)
fmt.Fprintf(w, " error: %v\n", metric.Error())
case Histogram:
h := metric.Snapshot()
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
fmt.Fprintf(w, "histogram %s\n", namedMetric.name)
fmt.Fprintf(w, " count: %9d\n", h.Count())
fmt.Fprintf(w, " min: %9d\n", h.Min())
fmt.Fprintf(w, " max: %9d\n", h.Max())
fmt.Fprintf(w, " mean: %12.2f\n", h.Mean())
fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev())
fmt.Fprintf(w, " median: %12.2f\n", ps[0])
fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
case Meter:
m := metric.Snapshot()
fmt.Fprintf(w, "meter %s\n", namedMetric.name)
fmt.Fprintf(w, " count: %9d\n", m.Count())
fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1())
fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5())
fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15())
fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean())
case Timer:
t := metric.Snapshot()
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
fmt.Fprintf(w, "timer %s\n", namedMetric.name)
fmt.Fprintf(w, " count: %9d\n", t.Count())
fmt.Fprintf(w, " min: %9d\n", t.Min())
fmt.Fprintf(w, " max: %9d\n", t.Max())
fmt.Fprintf(w, " mean: %12.2f\n", t.Mean())
fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev())
fmt.Fprintf(w, " median: %12.2f\n", ps[0])
fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1())
fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5())
fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15())
fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean())
}
}
}
type namedMetric struct {
name string
m interface{}
}
// namedMetricSlice is a slice of namedMetrics that implements sort.Interface.
type namedMetricSlice []namedMetric
func (nms namedMetricSlice) Len() int { return len(nms) }
func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] }
func (nms namedMetricSlice) Less(i, j int) bool {
return nms[i].name < nms[j].name
}

View file

@ -1,4 +0,0 @@
*.nut
*.swp
examples/example1
examples/example_web

View file

@ -1 +0,0 @@
language: go

View file

@ -1,24 +0,0 @@
Copyright (c) 2013 Yuriy Vasiyarov. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,119 +0,0 @@
# GoRelic
New Relic agent for Go runtime. It collect a lot of metrics about scheduler, garbage collector and memory allocator and
send them to NewRelic.
### Requirements
- Go 1.1 or higher
- github.com/yvasiyarov/gorelic
- github.com/yvasiyarov/newrelic_platform_go
- github.com/yvasiyarov/go-metrics
You have to install manually only first two dependencies. All other dependencies will be installed automatically
by Go toolchain.
### Installation
```bash
go get github.com/yvasiyarov/gorelic
```
and add to the initialization part of your application following code:
```go
import (
"github.com/yvasiyarov/gorelic"
)
....
agent := gorelic.NewAgent()
agent.Verbose = true
agent.NewrelicLicense = "YOUR NEWRELIC LICENSE KEY THERE"
agent.Run()
```
### Middleware
If you using Beego, Martini, Revel or Gin framework you can hook up gorelic with your application by using the following middleware:
- https://github.com/yvasiyarov/beego_gorelic
- https://github.com/yvasiyarov/martini_gorelic
- https://github.com/yvasiyarov/gocraft_gorelic
- http://wiki.colar.net/revel_newelic
- https://github.com/jingweno/negroni-gorelic
- https://github.com/brandfolder/gin-gorelic
### Configuration
- NewrelicLicense - its the only mandatory setting of this agent.
- NewrelicName - component name in NewRelic dashboard. Default value: "Go daemon"
- NewrelicPollInterval - how often metrics will be sent to NewRelic. Default value: 60 seconds
- Verbose - print some usefull for debugging information. Default value: false
- CollectGcStat - should agent collect garbage collector statistic or not. Default value: true
- CollectHTTPStat - should agent collect HTTP metrics. Default value: false
- CollectMemoryStat - should agent collect memory allocator statistic or not. Default value: true
- GCPollInterval - how often should GC statistic collected. Default value: 10 seconds. It has performance impact. For more information, please, see metrics documentation.
- MemoryAllocatorPollInterval - how often should memory allocator statistic collected. Default value: 60 seconds. It has performance impact. For more information, please, read metrics documentation.
## Metrics reported by plugin
This agent use functions exposed by runtime or runtime/debug packages to collect most important information about Go runtime.
### General metrics
- Runtime/General/NOGoroutines - number of runned go routines, as it reported by NumGoroutine() from runtime package
- Runtime/General/NOCgoCalls - number of runned cgo calls, as it reported by NumCgoCall() from runtime package
### Garbage collector metrics
- Runtime/GC/NumberOfGCCalls - Nuber of GC calls, as it reported by ReadGCStats() from runtime/debug
- Runtime/GC/PauseTotalTime - Total pause time diring GC calls, as it reported by ReadGCStats() from runtime/debug (in nanoseconds)
- Runtime/GC/GCTime/Max - max GC time
- Runtime/GC/GCTime/Min - min GC time
- Runtime/GC/GCTime/Mean - GC mean time
- Runtime/GC/GCTime/Percentile95 - 95% percentile of GC time
All this metrics are measured in nanoseconds. Last 4 of them can be inaccurate if GC called more often then once in GCPollInterval.
If in your workload GC is called more often - you can consider decreasing value of GCPollInterval.
But be carefull, ReadGCStats() blocks mheap, so its not good idea to set GCPollInterval to very low values.
### Memory allocator
- Component/Runtime/Memory/SysMem/Total - number of bytes/minute allocated from OS totally.
- Component/Runtime/Memory/SysMem/Stack - number of bytes/minute allocated from OS for stacks.
- Component/Runtime/Memory/SysMem/MSpan - number of bytes/minute allocated from OS for internal MSpan structs.
- Component/Runtime/Memory/SysMem/MCache - number of bytes/minute allocated from OS for internal MCache structs.
- Component/Runtime/Memory/SysMem/Heap - number of bytes/minute allocated from OS for heap.
- Component/Runtime/Memory/SysMem/BuckHash - number of bytes/minute allocated from OS for internal BuckHash structs.
- Component/Runtime/Memory/Operations/NoFrees - number of memory frees per minute
- Component/Runtime/Memory/Operations/NoMallocs - number of memory allocations per minute
- Component/Runtime/Memory/Operations/NoPointerLookups - number of pointer lookups per minute
- Component/Runtime/Memory/InUse/Total - total amount of memory in use
- Component/Runtime/Memory/InUse/Heap - amount of memory in use for heap
- Component/Runtime/Memory/InUse/MCacheInuse - amount of memory in use for MCache internal structures
- Component/Runtime/Memory/InUse/MSpanInuse - amount of memory in use for MSpan internal structures
- Component/Runtime/Memory/InUse/Stack - amount of memory in use for stacks
### Process metrics
- Component/Runtime/System/Threads - number of OS threads used
- Runtime/System/FDSize - number of file descriptors, used by process
- Runtime/System/Memory/VmPeakSize - VM max size
- Runtime/System/Memory/VmCurrent - VM current size
- Runtime/System/Memory/RssPeak - max size of resident memory set
- Runtime/System/Memory/RssCurrent - current size of resident memory set
All this metrics collected once in MemoryAllocatorPollInterval. In order to collect this statistic agent use ReadMemStats() routine.
This routine calls stoptheworld() internally and it block everything. So, please, consider this when you change MemoryAllocatorPollInterval value.
### HTTP metrics
- throughput (requests per second), calculated for last minute
- mean throughput (requests per second)
- mean response time
- min response time
- max response time
- 75%, 90%, 95% percentiles for response time
In order to collect HTTP metrics, handler functions must be wrapped using WrapHTTPHandlerFunc:
```go
http.HandleFunc("/", agent.WrapHTTPHandlerFunc(handler))
```
## TODO
- Collect per-size allocation statistic
- Collect user defined metrics

View file

@ -1,137 +0,0 @@
package gorelic
import (
"errors"
"fmt"
metrics "github.com/yvasiyarov/go-metrics"
"github.com/yvasiyarov/newrelic_platform_go"
"log"
"net/http"
)
const (
// DefaultNewRelicPollInterval - how often we will report metrics to NewRelic.
// Recommended values is 60 seconds
DefaultNewRelicPollInterval = 60
// DefaultGcPollIntervalInSeconds - how often we will get garbage collector run statistic
// Default value is - every 10 seconds
// During GC stat pooling - mheap will be locked, so be carefull changing this value
DefaultGcPollIntervalInSeconds = 10
// DefaultMemoryAllocatorPollIntervalInSeconds - how often we will get memory allocator statistic.
// Default value is - every 60 seconds
// During this process stoptheword() is called, so be carefull changing this value
DefaultMemoryAllocatorPollIntervalInSeconds = 60
//DefaultAgentGuid is plugin ID in NewRelic.
//You should not change it unless you want to create your own plugin.
DefaultAgentGuid = "com.github.yvasiyarov.GoRelic"
//CurrentAgentVersion is plugin version
CurrentAgentVersion = "0.0.6"
//DefaultAgentName in NewRelic GUI. You can change it.
DefaultAgentName = "Go daemon"
)
//Agent - is NewRelic agent implementation.
//Agent start separate go routine which will report data to NewRelic
type Agent struct {
NewrelicName string
NewrelicLicense string
NewrelicPollInterval int
Verbose bool
CollectGcStat bool
CollectMemoryStat bool
CollectHTTPStat bool
GCPollInterval int
MemoryAllocatorPollInterval int
AgentGUID string
AgentVersion string
plugin *newrelic_platform_go.NewrelicPlugin
HTTPTimer metrics.Timer
}
//NewAgent build new Agent objects.
func NewAgent() *Agent {
agent := &Agent{
NewrelicName: DefaultAgentName,
NewrelicPollInterval: DefaultNewRelicPollInterval,
Verbose: false,
CollectGcStat: true,
CollectMemoryStat: true,
GCPollInterval: DefaultGcPollIntervalInSeconds,
MemoryAllocatorPollInterval: DefaultMemoryAllocatorPollIntervalInSeconds,
AgentGUID: DefaultAgentGuid,
AgentVersion: CurrentAgentVersion,
}
return agent
}
//WrapHTTPHandlerFunc instrument HTTP handler functions to collect HTTP metrics
func (agent *Agent) WrapHTTPHandlerFunc(h tHTTPHandlerFunc) tHTTPHandlerFunc {
agent.initTimer()
return func(w http.ResponseWriter, req *http.Request) {
proxy := newHTTPHandlerFunc(h)
proxy.timer = agent.HTTPTimer
proxy.ServeHTTP(w, req)
}
}
//WrapHTTPHandler instrument HTTP handler object to collect HTTP metrics
func (agent *Agent) WrapHTTPHandler(h http.Handler) http.Handler {
agent.initTimer()
proxy := newHTTPHandler(h)
proxy.timer = agent.HTTPTimer
return proxy
}
//Run initialize Agent instance and start harvest go routine
func (agent *Agent) Run() error {
if agent.NewrelicLicense == "" {
return errors.New("please, pass a valid newrelic license key")
}
agent.plugin = newrelic_platform_go.NewNewrelicPlugin(agent.AgentVersion, agent.NewrelicLicense, agent.NewrelicPollInterval)
component := newrelic_platform_go.NewPluginComponent(agent.NewrelicName, agent.AgentGUID)
agent.plugin.AddComponent(component)
addRuntimeMericsToComponent(component)
if agent.CollectGcStat {
addGCMericsToComponent(component, agent.GCPollInterval)
agent.debug(fmt.Sprintf("Init GC metrics collection. Poll interval %d seconds.", agent.GCPollInterval))
}
if agent.CollectMemoryStat {
addMemoryMericsToComponent(component, agent.MemoryAllocatorPollInterval)
agent.debug(fmt.Sprintf("Init memory allocator metrics collection. Poll interval %d seconds.", agent.MemoryAllocatorPollInterval))
}
if agent.CollectHTTPStat {
agent.initTimer()
addHTTPMericsToComponent(component, agent.HTTPTimer)
agent.debug(fmt.Sprintf("Init HTTP metrics collection."))
}
agent.plugin.Verbose = agent.Verbose
go agent.plugin.Run()
return nil
}
//Initialize global metrics.Timer object, used to collect HTTP metrics
func (agent *Agent) initTimer() {
if agent.HTTPTimer == nil {
agent.HTTPTimer = metrics.NewTimer()
}
agent.CollectHTTPStat = true
}
//Print debug messages
func (agent *Agent) debug(msg string) {
if agent.Verbose {
log.Println(msg)
}
}

View file

@ -1,2 +0,0 @@
// Package gorelic is an New Relic agent implementation for Go runtime. It collect a lot of metrics about Go scheduler, garbage collector and memory allocator and send them to NewRelic.
package gorelic

View file

@ -1,65 +0,0 @@
package gorelic
import (
metrics "github.com/yvasiyarov/go-metrics"
"github.com/yvasiyarov/newrelic_platform_go"
"time"
)
func newGCMetricaDataSource(pollInterval int) goMetricaDataSource {
r := metrics.NewRegistry()
metrics.RegisterDebugGCStats(r)
go metrics.CaptureDebugGCStats(r, time.Duration(pollInterval)*time.Second)
return goMetricaDataSource{r}
}
func addGCMericsToComponent(component newrelic_platform_go.IComponent, pollInterval int) {
metrics := []*baseGoMetrica{
&baseGoMetrica{
name: "NumberOfGCCalls",
units: "calls",
dataSourceKey: "debug.GCStats.NumGC",
},
&baseGoMetrica{
name: "PauseTotalTime",
units: "nanoseconds",
dataSourceKey: "debug.GCStats.PauseTotal",
},
}
ds := newGCMetricaDataSource(pollInterval)
for _, m := range metrics {
m.basePath = "Runtime/GC/"
m.dataSource = ds
component.AddMetrica(&gaugeMetrica{m})
}
histogramMetrics := []*histogramMetrica{
&histogramMetrica{
statFunction: histogramMax,
baseGoMetrica: &baseGoMetrica{name: "Max"},
},
&histogramMetrica{
statFunction: histogramMin,
baseGoMetrica: &baseGoMetrica{name: "Min"},
},
&histogramMetrica{
statFunction: histogramMean,
baseGoMetrica: &baseGoMetrica{name: "Mean"},
},
&histogramMetrica{
statFunction: histogramPercentile,
percentileValue: 0.95,
baseGoMetrica: &baseGoMetrica{name: "Percentile95"},
},
}
for _, m := range histogramMetrics {
m.baseGoMetrica.units = "nanoseconds"
m.baseGoMetrica.dataSourceKey = "debug.GCStats.Pause"
m.baseGoMetrica.basePath = "Runtime/GC/GCTime/"
m.baseGoMetrica.dataSource = ds
component.AddMetrica(m)
}
}

View file

@ -1,105 +0,0 @@
package gorelic
import (
"fmt"
metrics "github.com/yvasiyarov/go-metrics"
)
const (
histogramMin = iota
histogramMax
histogramMean
histogramPercentile
histogramStdDev
histogramVariance
noHistogramFunctions
)
type goMetricaDataSource struct {
metrics.Registry
}
func (ds goMetricaDataSource) GetGaugeValue(key string) (float64, error) {
if valueContainer := ds.Get(key); valueContainer == nil {
return 0, fmt.Errorf("metrica with name %s is not registered\n", key)
} else if gauge, ok := valueContainer.(metrics.Gauge); ok {
return float64(gauge.Value()), nil
} else {
return 0, fmt.Errorf("metrica container has unexpected type: %T\n", valueContainer)
}
}
func (ds goMetricaDataSource) GetHistogramValue(key string, statFunction int, percentile float64) (float64, error) {
if valueContainer := ds.Get(key); valueContainer == nil {
return 0, fmt.Errorf("metrica with name %s is not registered\n", key)
} else if histogram, ok := valueContainer.(metrics.Histogram); ok {
switch statFunction {
default:
return 0, fmt.Errorf("unsupported stat function for histogram: %s\n", statFunction)
case histogramMax:
return float64(histogram.Max()), nil
case histogramMin:
return float64(histogram.Min()), nil
case histogramMean:
return float64(histogram.Mean()), nil
case histogramStdDev:
return float64(histogram.StdDev()), nil
case histogramVariance:
return float64(histogram.Variance()), nil
case histogramPercentile:
return float64(histogram.Percentile(percentile)), nil
}
} else {
return 0, fmt.Errorf("metrica container has unexpected type: %T\n", valueContainer)
}
}
type baseGoMetrica struct {
dataSource goMetricaDataSource
basePath string
name string
units string
dataSourceKey string
}
func (metrica *baseGoMetrica) GetName() string {
return metrica.basePath + metrica.name
}
func (metrica *baseGoMetrica) GetUnits() string {
return metrica.units
}
type gaugeMetrica struct {
*baseGoMetrica
}
func (metrica *gaugeMetrica) GetValue() (float64, error) {
return metrica.dataSource.GetGaugeValue(metrica.dataSourceKey)
}
type gaugeIncMetrica struct {
*baseGoMetrica
previousValue float64
}
func (metrica *gaugeIncMetrica) GetValue() (float64, error) {
var value float64
var currentValue float64
var err error
if currentValue, err = metrica.dataSource.GetGaugeValue(metrica.dataSourceKey); err == nil {
value = currentValue - metrica.previousValue
metrica.previousValue = currentValue
}
return value, err
}
type histogramMetrica struct {
*baseGoMetrica
statFunction int
percentileValue float64
}
func (metrica *histogramMetrica) GetValue() (float64, error) {
return metrica.dataSource.GetHistogramValue(metrica.dataSourceKey, metrica.statFunction, metrica.percentileValue)
}

View file

@ -1,194 +0,0 @@
package gorelic
import (
metrics "github.com/yvasiyarov/go-metrics"
"github.com/yvasiyarov/newrelic_platform_go"
"net/http"
"time"
)
type tHTTPHandlerFunc func(http.ResponseWriter, *http.Request)
type tHTTPHandler struct {
originalHandler http.Handler
originalHandlerFunc tHTTPHandlerFunc
isFunc bool
timer metrics.Timer
}
var httpTimer metrics.Timer
func newHTTPHandlerFunc(h tHTTPHandlerFunc) *tHTTPHandler {
return &tHTTPHandler{
isFunc: true,
originalHandlerFunc: h,
}
}
func newHTTPHandler(h http.Handler) *tHTTPHandler {
return &tHTTPHandler{
isFunc: false,
originalHandler: h,
}
}
func (handler *tHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
startTime := time.Now()
defer handler.timer.UpdateSince(startTime)
if handler.isFunc {
handler.originalHandlerFunc(w, req)
} else {
handler.originalHandler.ServeHTTP(w, req)
}
}
type baseTimerMetrica struct {
dataSource metrics.Timer
name string
units string
}
func (metrica *baseTimerMetrica) GetName() string {
return metrica.name
}
func (metrica *baseTimerMetrica) GetUnits() string {
return metrica.units
}
type timerRate1Metrica struct {
*baseTimerMetrica
}
func (metrica *timerRate1Metrica) GetValue() (float64, error) {
return metrica.dataSource.Rate1(), nil
}
type timerRateMeanMetrica struct {
*baseTimerMetrica
}
func (metrica *timerRateMeanMetrica) GetValue() (float64, error) {
return metrica.dataSource.RateMean(), nil
}
type timerMeanMetrica struct {
*baseTimerMetrica
}
func (metrica *timerMeanMetrica) GetValue() (float64, error) {
return metrica.dataSource.Mean() / float64(time.Millisecond), nil
}
type timerMinMetrica struct {
*baseTimerMetrica
}
func (metrica *timerMinMetrica) GetValue() (float64, error) {
return float64(metrica.dataSource.Min()) / float64(time.Millisecond), nil
}
type timerMaxMetrica struct {
*baseTimerMetrica
}
func (metrica *timerMaxMetrica) GetValue() (float64, error) {
return float64(metrica.dataSource.Max()) / float64(time.Millisecond), nil
}
type timerPercentile75Metrica struct {
*baseTimerMetrica
}
func (metrica *timerPercentile75Metrica) GetValue() (float64, error) {
return metrica.dataSource.Percentile(0.75) / float64(time.Millisecond), nil
}
type timerPercentile90Metrica struct {
*baseTimerMetrica
}
func (metrica *timerPercentile90Metrica) GetValue() (float64, error) {
return metrica.dataSource.Percentile(0.90) / float64(time.Millisecond), nil
}
type timerPercentile95Metrica struct {
*baseTimerMetrica
}
func (metrica *timerPercentile95Metrica) GetValue() (float64, error) {
return metrica.dataSource.Percentile(0.95) / float64(time.Millisecond), nil
}
func addHTTPMericsToComponent(component newrelic_platform_go.IComponent, timer metrics.Timer) {
rate1 := &timerRate1Metrica{
baseTimerMetrica: &baseTimerMetrica{
name: "http/throughput/1minute",
units: "rps",
dataSource: timer,
},
}
component.AddMetrica(rate1)
rateMean := &timerRateMeanMetrica{
baseTimerMetrica: &baseTimerMetrica{
name: "http/throughput/rateMean",
units: "rps",
dataSource: timer,
},
}
component.AddMetrica(rateMean)
responseTimeMean := &timerMeanMetrica{
baseTimerMetrica: &baseTimerMetrica{
name: "http/responseTime/mean",
units: "ms",
dataSource: timer,
},
}
component.AddMetrica(responseTimeMean)
responseTimeMax := &timerMaxMetrica{
baseTimerMetrica: &baseTimerMetrica{
name: "http/responseTime/max",
units: "ms",
dataSource: timer,
},
}
component.AddMetrica(responseTimeMax)
responseTimeMin := &timerMinMetrica{
baseTimerMetrica: &baseTimerMetrica{
name: "http/responseTime/min",
units: "ms",
dataSource: timer,
},
}
component.AddMetrica(responseTimeMin)
responseTimePercentile75 := &timerPercentile75Metrica{
baseTimerMetrica: &baseTimerMetrica{
name: "http/responseTime/percentile75",
units: "ms",
dataSource: timer,
},
}
component.AddMetrica(responseTimePercentile75)
responseTimePercentile90 := &timerPercentile90Metrica{
baseTimerMetrica: &baseTimerMetrica{
name: "http/responseTime/percentile90",
units: "ms",
dataSource: timer,
},
}
component.AddMetrica(responseTimePercentile90)
responseTimePercentile95 := &timerPercentile95Metrica{
baseTimerMetrica: &baseTimerMetrica{
name: "http/responseTime/percentile95",
units: "ms",
dataSource: timer,
},
}
component.AddMetrica(responseTimePercentile95)
}

View file

@ -1,110 +0,0 @@
package gorelic
import (
metrics "github.com/yvasiyarov/go-metrics"
"github.com/yvasiyarov/newrelic_platform_go"
"time"
)
func newMemoryMetricaDataSource(pollInterval int) goMetricaDataSource {
r := metrics.NewRegistry()
metrics.RegisterRuntimeMemStats(r)
metrics.CaptureRuntimeMemStatsOnce(r)
go metrics.CaptureRuntimeMemStats(r, time.Duration(pollInterval)*time.Second)
return goMetricaDataSource{r}
}
func addMemoryMericsToComponent(component newrelic_platform_go.IComponent, pollInterval int) {
gaugeMetrics := []*baseGoMetrica{
//Memory in use metrics
&baseGoMetrica{
name: "InUse/Total",
units: "bytes",
dataSourceKey: "runtime.MemStats.Alloc",
},
&baseGoMetrica{
name: "InUse/Heap",
units: "bytes",
dataSourceKey: "runtime.MemStats.HeapAlloc",
},
&baseGoMetrica{
name: "InUse/Stack",
units: "bytes",
dataSourceKey: "runtime.MemStats.StackInuse",
},
&baseGoMetrica{
name: "InUse/MSpanInuse",
units: "bytes",
dataSourceKey: "runtime.MemStats.MSpanInuse",
},
&baseGoMetrica{
name: "InUse/MCacheInuse",
units: "bytes",
dataSourceKey: "runtime.MemStats.MCacheInuse",
},
}
ds := newMemoryMetricaDataSource(pollInterval)
for _, m := range gaugeMetrics {
m.basePath = "Runtime/Memory/"
m.dataSource = ds
component.AddMetrica(&gaugeMetrica{m})
}
gaugeIncMetrics := []*baseGoMetrica{
//NO operations graph
&baseGoMetrica{
name: "Operations/NoPointerLookups",
units: "lookups",
dataSourceKey: "runtime.MemStats.Lookups",
},
&baseGoMetrica{
name: "Operations/NoMallocs",
units: "mallocs",
dataSourceKey: "runtime.MemStats.Mallocs",
},
&baseGoMetrica{
name: "Operations/NoFrees",
units: "frees",
dataSourceKey: "runtime.MemStats.Frees",
},
// Sytem memory allocations
&baseGoMetrica{
name: "SysMem/Total",
units: "bytes",
dataSourceKey: "runtime.MemStats.Sys",
},
&baseGoMetrica{
name: "SysMem/Heap",
units: "bytes",
dataSourceKey: "runtime.MemStats.HeapSys",
},
&baseGoMetrica{
name: "SysMem/Stack",
units: "bytes",
dataSourceKey: "runtime.MemStats.StackSys",
},
&baseGoMetrica{
name: "SysMem/MSpan",
units: "bytes",
dataSourceKey: "runtime.MemStats.MSpanSys",
},
&baseGoMetrica{
name: "SysMem/MCache",
units: "bytes",
dataSourceKey: "runtime.MemStats.MCacheSys",
},
&baseGoMetrica{
name: "SysMem/BuckHash",
units: "bytes",
dataSourceKey: "runtime.MemStats.BuckHashSys",
},
}
for _, m := range gaugeIncMetrics {
m.basePath = "Runtime/Memory/"
m.dataSource = ds
component.AddMetrica(&gaugeIncMetrica{baseGoMetrica: m})
}
}

View file

@ -1,15 +0,0 @@
{
"Version": "0.0.6",
"Vendor": "yvasiyarov",
"Authors": [
{
"FullName": "Yuriy Vasiyarov",
"Email": "varyous@gmail.com"
}
],
"ExtraFiles": [
"README.md",
"LICENSE"
],
"Homepage": "https://github.com/yvasiyarov/gorelic"
}

View file

@ -1,196 +0,0 @@
package gorelic
import (
"fmt"
"github.com/yvasiyarov/newrelic_platform_go"
"io/ioutil"
"os"
"runtime"
"strconv"
"strings"
"time"
)
const linuxSystemQueryInterval = 60
// Number of goroutines metrica
type noGoroutinesMetrica struct{}
func (metrica *noGoroutinesMetrica) GetName() string {
return "Runtime/General/NOGoroutines"
}
func (metrica *noGoroutinesMetrica) GetUnits() string {
return "goroutines"
}
func (metrica *noGoroutinesMetrica) GetValue() (float64, error) {
return float64(runtime.NumGoroutine()), nil
}
// Number of CGO calls metrica
type noCgoCallsMetrica struct {
lastValue int64
}
func (metrica *noCgoCallsMetrica) GetName() string {
return "Runtime/General/NOCgoCalls"
}
func (metrica *noCgoCallsMetrica) GetUnits() string {
return "calls"
}
func (metrica *noCgoCallsMetrica) GetValue() (float64, error) {
currentValue := runtime.NumCgoCall()
value := float64(currentValue - metrica.lastValue)
metrica.lastValue = currentValue
return value, nil
}
//OS specific metrics data source interface
type iSystemMetricaDataSource interface {
GetValue(key string) (float64, error)
}
// iSystemMetricaDataSource fabrica
func newSystemMetricaDataSource() iSystemMetricaDataSource {
var ds iSystemMetricaDataSource
switch runtime.GOOS {
default:
ds = &systemMetricaDataSource{}
case "linux":
ds = &linuxSystemMetricaDataSource{
systemData: make(map[string]string),
}
}
return ds
}
//Default implementation of iSystemMetricaDataSource. Just return an error
type systemMetricaDataSource struct{}
func (ds *systemMetricaDataSource) GetValue(key string) (float64, error) {
return 0, fmt.Errorf("this metrica was not implemented yet for %s", runtime.GOOS)
}
// Linux OS implementation of ISystemMetricaDataSource
type linuxSystemMetricaDataSource struct {
lastUpdate time.Time
systemData map[string]string
}
func (ds *linuxSystemMetricaDataSource) GetValue(key string) (float64, error) {
if err := ds.checkAndUpdateData(); err != nil {
return 0, err
} else if val, ok := ds.systemData[key]; !ok {
return 0, fmt.Errorf("system data with key %s was not found", key)
} else if key == "VmSize" || key == "VmPeak" || key == "VmHWM" || key == "VmRSS" {
valueParts := strings.Split(val, " ")
if len(valueParts) != 2 {
return 0, fmt.Errorf("invalid format for value %s", key)
}
valConverted, err := strconv.ParseFloat(valueParts[0], 64)
if err != nil {
return 0, err
}
switch valueParts[1] {
case "kB":
valConverted *= 1 << 10
case "mB":
valConverted *= 1 << 20
case "gB":
valConverted *= 1 << 30
}
return valConverted, nil
} else if valConverted, err := strconv.ParseFloat(val, 64); err != nil {
return valConverted, nil
} else {
return valConverted, nil
}
}
func (ds *linuxSystemMetricaDataSource) checkAndUpdateData() error {
startTime := time.Now()
if startTime.Sub(ds.lastUpdate) > time.Second*linuxSystemQueryInterval {
path := fmt.Sprintf("/proc/%d/status", os.Getpid())
rawStats, err := ioutil.ReadFile(path)
if err != nil {
return err
}
lines := strings.Split(string(rawStats), "\n")
for _, line := range lines {
parts := strings.Split(line, ":")
if len(parts) == 2 {
k := strings.TrimSpace(parts[0])
v := strings.TrimSpace(parts[1])
ds.systemData[k] = v
}
}
ds.lastUpdate = startTime
}
return nil
}
// OS specific metrica
type systemMetrica struct {
sourceKey string
newrelicName string
units string
dataSource iSystemMetricaDataSource
}
func (metrica *systemMetrica) GetName() string {
return metrica.newrelicName
}
func (metrica *systemMetrica) GetUnits() string {
return metrica.units
}
func (metrica *systemMetrica) GetValue() (float64, error) {
return metrica.dataSource.GetValue(metrica.sourceKey)
}
func addRuntimeMericsToComponent(component newrelic_platform_go.IComponent) {
component.AddMetrica(&noGoroutinesMetrica{})
component.AddMetrica(&noCgoCallsMetrica{})
ds := newSystemMetricaDataSource()
metrics := []*systemMetrica{
&systemMetrica{
sourceKey: "Threads",
units: "Threads",
newrelicName: "Runtime/System/Threads",
},
&systemMetrica{
sourceKey: "FDSize",
units: "fd",
newrelicName: "Runtime/System/FDSize",
},
// Peak virtual memory size
&systemMetrica{
sourceKey: "VmPeak",
units: "bytes",
newrelicName: "Runtime/System/Memory/VmPeakSize",
},
//Virtual memory size
&systemMetrica{
sourceKey: "VmSize",
units: "bytes",
newrelicName: "Runtime/System/Memory/VmCurrent",
},
//Peak resident set size
&systemMetrica{
sourceKey: "VmHWM",
units: "bytes",
newrelicName: "Runtime/System/Memory/RssPeak",
},
//Resident set size
&systemMetrica{
sourceKey: "VmRSS",
units: "bytes",
newrelicName: "Runtime/System/Memory/RssCurrent",
},
}
for _, m := range metrics {
m.dataSource = ds
component.AddMetrica(m)
}
}

View file

@ -1 +0,0 @@
language: go

View file

@ -1,24 +0,0 @@
Copyright (c) 2013 Yuriy Vasiyarov. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,11 +0,0 @@
New Relic Platform Agent SDK for Go(golang)
====================
[![Build Status](https://travis-ci.org/yvasiyarov/newrelic_platform_go.png?branch=master)](https://travis-ci.org/yvasiyarov/newrelic_platform_go)
This package provide very simple interface to NewRelic Platform http://newrelic.com/platform
For example of usage see examples/wave_plugin.go
For real-word example, you can have a look at:
https://github.com/yvasiyarov/newrelic_sphinx

View file

@ -1,27 +0,0 @@
package newrelic_platform_go
import (
"log"
"os"
)
type Agent struct {
Host string `json:"host"`
Version string `json:"version"`
Pid int `json:"pid"`
}
func NewAgent(Version string) *Agent {
agent := &Agent{
Version: Version,
}
return agent
}
func (agent *Agent) CollectEnvironmentInfo() {
var err error
agent.Pid = os.Getpid()
if agent.Host, err = os.Hostname(); err != nil {
log.Fatalf("Can not get hostname: %#v \n", err)
}
}

View file

@ -1,71 +0,0 @@
package newrelic_platform_go
import (
"log"
"math"
)
type ComponentData interface{}
type IComponent interface {
Harvest(plugin INewrelicPlugin) ComponentData
SetDuration(duration int)
AddMetrica(model IMetrica)
ClearSentData()
}
type PluginComponent struct {
Name string `json:"name"`
GUID string `json:"guid"`
Duration int `json:"duration"`
Metrics map[string]MetricaValue `json:"metrics"`
MetricaModels []IMetrica `json:"-"`
}
func NewPluginComponent(name string, guid string) *PluginComponent {
c := &PluginComponent{
Name: name,
GUID: guid,
}
return c
}
func (component *PluginComponent) AddMetrica(model IMetrica) {
component.MetricaModels = append(component.MetricaModels, model)
}
func (component *PluginComponent) ClearSentData() {
component.Metrics = nil
}
func (component *PluginComponent) SetDuration(duration int) {
component.Duration = duration
}
func (component *PluginComponent) Harvest(plugin INewrelicPlugin) ComponentData {
component.Metrics = make(map[string]MetricaValue, len(component.MetricaModels))
for i := 0; i < len(component.MetricaModels); i++ {
model := component.MetricaModels[i]
metricaKey := plugin.GetMetricaKey(model)
if newValue, err := model.GetValue(); err == nil {
if math.IsInf(newValue, 0) || math.IsNaN(newValue) {
newValue = 0
}
if existMetric, ok := component.Metrics[metricaKey]; ok {
if floatExistVal, ok := existMetric.(float64); ok {
component.Metrics[metricaKey] = NewAggregatedMetricaValue(floatExistVal, newValue)
} else if aggregatedValue, ok := existMetric.(*AggregatedMetricaValue); ok {
aggregatedValue.Aggregate(newValue)
} else {
panic("Invalid type in metrica value")
}
} else {
component.Metrics[metricaKey] = newValue
}
} else {
log.Printf("Can not get metrica: %v, got error:%#v", model.GetName(), err)
}
}
return component
}

View file

@ -1,2 +0,0 @@
// Package newrelic_platform_go is New Relic Platform Agent SDK for Go language.
package newrelic_platform_go

View file

@ -1,42 +0,0 @@
package newrelic_platform_go
import (
"math"
)
type IMetrica interface {
GetValue() (float64, error)
GetName() string
GetUnits() string
}
type MetricaValue interface{}
type SimpleMetricaValue float64
type AggregatedMetricaValue struct {
Min float64 `json:"min"`
Max float64 `json:"max"`
Total float64 `json:"total"`
Count int `json:"count"`
SumOfSquares float64 `json:"sum_of_squares"`
}
func NewAggregatedMetricaValue(existValue float64, newValue float64) *AggregatedMetricaValue {
v := &AggregatedMetricaValue{
Min: math.Min(newValue, existValue),
Max: math.Max(newValue, existValue),
Total: newValue + existValue,
Count: 2,
SumOfSquares: newValue*newValue + existValue*existValue,
}
return v
}
func (aggregatedValue *AggregatedMetricaValue) Aggregate(newValue float64) {
aggregatedValue.Min = math.Min(newValue, aggregatedValue.Min)
aggregatedValue.Max = math.Max(newValue, aggregatedValue.Max)
aggregatedValue.Total += newValue
aggregatedValue.Count++
aggregatedValue.SumOfSquares += newValue * newValue
}

View file

@ -1,15 +0,0 @@
{
"Version": "0.0.1",
"Vendor": "yvasiyarov",
"Authors": [
{
"FullName": "Yuriy Vasiyarov",
"Email": "varyous@gmail.com"
}
],
"ExtraFiles": [
"README.md",
"LICENSE"
],
"Homepage": "https://github.com/yvasiyarov/newrelic_platform_go"
}

View file

@ -1,194 +0,0 @@
package newrelic_platform_go
import (
"bytes"
"encoding/json"
"fmt"
"log"
"net/http"
"strings"
"time"
)
const (
NEWRELIC_API_URL = "https://platform-api.newrelic.com/platform/v1/metrics"
)
type INewrelicPlugin interface {
GetMetricaKey(metrica IMetrica) string
Harvest() error
Run()
AddComponent(component IComponent)
}
type NewrelicPlugin struct {
Agent *Agent `json:"agent"`
Components []ComponentData `json:"components"`
ComponentModels []IComponent `json:"-"`
LastPollTime time.Time `json:"-"`
Verbose bool `json:"-"`
LicenseKey string `json:"-"`
PollIntervalInSecond int `json:"-"`
}
func NewNewrelicPlugin(version string, licenseKey string, pollInterval int) *NewrelicPlugin {
plugin := &NewrelicPlugin{
LicenseKey: licenseKey,
PollIntervalInSecond: pollInterval,
}
plugin.Agent = NewAgent(version)
plugin.Agent.CollectEnvironmentInfo()
plugin.ComponentModels = []IComponent{}
return plugin
}
func (plugin *NewrelicPlugin) Harvest() error {
startTime := time.Now()
var duration int
if plugin.LastPollTime.IsZero() {
duration = plugin.PollIntervalInSecond
} else {
duration = int(startTime.Sub(plugin.LastPollTime).Seconds())
}
plugin.Components = make([]ComponentData, 0, len(plugin.ComponentModels))
for i := 0; i < len(plugin.ComponentModels); i++ {
plugin.ComponentModels[i].SetDuration(duration)
plugin.Components = append(plugin.Components, plugin.ComponentModels[i].Harvest(plugin))
}
if httpCode, err := plugin.SendMetricas(); err != nil {
log.Printf("Can not send metricas to newrelic: %#v\n", err)
return err
} else {
if plugin.Verbose {
log.Printf("Got HTTP response code:%d", httpCode)
}
if err, isFatal := plugin.CheckResponse(httpCode); isFatal {
log.Printf("Got fatal error:%v\n", err)
return err
} else {
if err != nil {
log.Printf("WARNING: %v", err)
}
return err
}
}
return nil
}
func (plugin *NewrelicPlugin) GetMetricaKey(metrica IMetrica) string {
var keyBuffer bytes.Buffer
keyBuffer.WriteString("Component/")
keyBuffer.WriteString(metrica.GetName())
keyBuffer.WriteString("[")
keyBuffer.WriteString(metrica.GetUnits())
keyBuffer.WriteString("]")
return keyBuffer.String()
}
func (plugin *NewrelicPlugin) SendMetricas() (int, error) {
client := &http.Client{}
var metricasJson []byte
var encodingError error
if plugin.Verbose {
metricasJson, encodingError = json.MarshalIndent(plugin, "", " ")
} else {
metricasJson, encodingError = json.Marshal(plugin)
}
if encodingError != nil {
return 0, encodingError
}
jsonAsString := string(metricasJson)
if plugin.Verbose {
log.Printf("Send data:%s \n", jsonAsString)
}
if httpRequest, err := http.NewRequest("POST", NEWRELIC_API_URL, strings.NewReader(jsonAsString)); err != nil {
return 0, err
} else {
httpRequest.Header.Set("X-License-Key", plugin.LicenseKey)
httpRequest.Header.Set("Content-Type", "application/json")
httpRequest.Header.Set("Accept", "application/json")
if httpResponse, err := client.Do(httpRequest); err != nil {
return 0, err
} else {
defer httpResponse.Body.Close()
return httpResponse.StatusCode, nil
}
}
// we will never get there
return 0, nil
}
func (plugin *NewrelicPlugin) ClearSentData() {
for _, component := range plugin.ComponentModels {
component.ClearSentData()
}
plugin.Components = nil
plugin.LastPollTime = time.Now()
}
func (plugin *NewrelicPlugin) CheckResponse(httpResponseCode int) (error, bool) {
isFatal := false
var err error
switch httpResponseCode {
case http.StatusOK:
{
plugin.ClearSentData()
}
case http.StatusForbidden:
{
err = fmt.Errorf("Authentication error (no license key header, or invalid license key).\n")
isFatal = true
}
case http.StatusBadRequest:
{
err = fmt.Errorf("The request or headers are in the wrong format or the URL is incorrect.\n")
isFatal = true
}
case http.StatusNotFound:
{
err = fmt.Errorf("Invalid URL\n")
isFatal = true
}
case http.StatusRequestEntityTooLarge:
{
err = fmt.Errorf("Too many metrics were sent in one request, or too many components (instances) were specified in one request, or other single-request limits were reached.\n")
//discard metrics
plugin.ClearSentData()
}
case http.StatusInternalServerError, http.StatusBadGateway, http.StatusServiceUnavailable, http.StatusGatewayTimeout:
{
err = fmt.Errorf("Got %v response code.Metricas will be aggregated", httpResponseCode)
}
}
return err, isFatal
}
func (plugin *NewrelicPlugin) Run() {
plugin.Harvest()
tickerChannel := time.Tick(time.Duration(plugin.PollIntervalInSecond) * time.Second)
for ts := range tickerChannel {
plugin.Harvest()
if plugin.Verbose {
log.Printf("Harvest ended at:%v\n", ts)
}
}
}
func (plugin *NewrelicPlugin) AddComponent(component IComponent) {
plugin.ComponentModels = append(plugin.ComponentModels, component)
}

9
vendor/modules.txt vendored
View file

@ -305,15 +305,6 @@ github.com/spf13/cobra
# github.com/spf13/pflag v1.0.5 # github.com/spf13/pflag v1.0.5
## explicit; go 1.12 ## explicit; go 1.12
github.com/spf13/pflag github.com/spf13/pflag
# github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43
## explicit
github.com/yvasiyarov/go-metrics
# github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50
## explicit
github.com/yvasiyarov/gorelic
# github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f
## explicit
github.com/yvasiyarov/newrelic_platform_go
# go.opencensus.io v0.24.0 # go.opencensus.io v0.24.0
## explicit; go 1.13 ## explicit; go 1.13
go.opencensus.io go.opencensus.io