* Speed up testing * make notification run in the background, this recudes the test_readme time from 18s to 0.10s * reduce time for zone reload * TestServeDNSConcurrent remove entirely. This took a whopping 58s for ... ? A few minutes staring didn't reveal wth it is actually testing. Making values smaller revealed race conditions in the tests. Remove entirely. * Move many interval values to variables so we can reset them to short values for the tests. * test_large_axfr: make the zone smaller. The number used 64K has no rational, make it 64/10 to speed up. * TestProxyThreeWay: use client with shorter timeout A few random tidbits in other tests. Total time saved: 177s (almost 3m) - which makes it worthwhile again to run the test locally: this branch: ~~~ ok github.com/coredns/coredns/test 10.437s cd plugin; time go t ./... 5,51s user 7,51s system 11,15s elapsed 744%CPU ( ~~~ master: ~~~ ok github.com/coredns/coredns/test 35.252s cd plugin; time go t ./... 157,64s user 15,39s system 50,05s elapsed 345%CPU () ~~~ tests/ -25s plugins/ -40s This brings the total on 20s, and another 10s can be saved by fixing dnstapio. Moving this to 5s would be even better, but 10s is also nice. Signed-off-by: Miek Gieben <miek@miek.nl> * Also 0.01 Signed-off-by: Miek Gieben <miek@miek.nl>
82 lines
1.7 KiB
Go
82 lines
1.7 KiB
Go
package forward
|
|
|
|
import (
|
|
"crypto/tls"
|
|
"runtime"
|
|
"sync/atomic"
|
|
"time"
|
|
|
|
"github.com/coredns/coredns/plugin/pkg/up"
|
|
)
|
|
|
|
// Proxy defines an upstream host.
|
|
type Proxy struct {
|
|
fails uint32
|
|
addr string
|
|
|
|
transport *Transport
|
|
|
|
// health checking
|
|
probe *up.Probe
|
|
health HealthChecker
|
|
}
|
|
|
|
// NewProxy returns a new proxy.
|
|
func NewProxy(addr, trans string) *Proxy {
|
|
p := &Proxy{
|
|
addr: addr,
|
|
fails: 0,
|
|
probe: up.New(),
|
|
transport: newTransport(addr),
|
|
}
|
|
p.health = NewHealthChecker(trans, true)
|
|
runtime.SetFinalizer(p, (*Proxy).finalizer)
|
|
return p
|
|
}
|
|
|
|
// SetTLSConfig sets the TLS config in the lower p.transport and in the healthchecking client.
|
|
func (p *Proxy) SetTLSConfig(cfg *tls.Config) {
|
|
p.transport.SetTLSConfig(cfg)
|
|
p.health.SetTLSConfig(cfg)
|
|
}
|
|
|
|
// SetExpire sets the expire duration in the lower p.transport.
|
|
func (p *Proxy) SetExpire(expire time.Duration) { p.transport.SetExpire(expire) }
|
|
|
|
// Healthcheck kicks of a round of health checks for this proxy.
|
|
func (p *Proxy) Healthcheck() {
|
|
if p.health == nil {
|
|
log.Warning("No healthchecker")
|
|
return
|
|
}
|
|
|
|
p.probe.Do(func() error {
|
|
return p.health.Check(p)
|
|
})
|
|
}
|
|
|
|
// Down returns true if this proxy is down, i.e. has *more* fails than maxfails.
|
|
func (p *Proxy) Down(maxfails uint32) bool {
|
|
if maxfails == 0 {
|
|
return false
|
|
}
|
|
|
|
fails := atomic.LoadUint32(&p.fails)
|
|
return fails > maxfails
|
|
}
|
|
|
|
// close stops the health checking goroutine.
|
|
func (p *Proxy) stop() { p.probe.Stop() }
|
|
func (p *Proxy) finalizer() { p.transport.Stop() }
|
|
|
|
// start starts the proxy's healthchecking.
|
|
func (p *Proxy) start(duration time.Duration) {
|
|
p.probe.Start(duration)
|
|
p.transport.Start()
|
|
}
|
|
|
|
const (
|
|
maxTimeout = 2 * time.Second
|
|
)
|
|
|
|
var hcInterval = 500 * time.Millisecond
|