* plugin/forward: erase expired connection by timer - in previous implementation, the expired connections resided in cache until new request to the same upstream/protocol came. In case if the upstream was unhealthy new request may come long time later or may not come at all. All this time expired connections held system resources (file descriptors, ephemeral ports). In my fix the expired connections and related resources are released by timer - decreased the complexity of taking connection from cache. The list of connections is treated as stack (LIFO queue), i.e. the connection is taken from the end of queue (the most fresh connection) and returned to the end (as it was implemented before). The remarkable thing is that all connections in the stack appear to be ordered by 'used' field - the cleanup() method finds the first good (not expired) connection in stack with binary search, since all connections are ordered by 'used' field * fix race conditions * minor enhancement * add comments
114 lines
2.6 KiB
Go
114 lines
2.6 KiB
Go
package forward
|
|
|
|
import (
|
|
"crypto/tls"
|
|
"runtime"
|
|
"sync/atomic"
|
|
"time"
|
|
|
|
"github.com/coredns/coredns/plugin/pkg/up"
|
|
|
|
"github.com/miekg/dns"
|
|
)
|
|
|
|
// Proxy defines an upstream host.
|
|
type Proxy struct {
|
|
addr string
|
|
client *dns.Client
|
|
|
|
// Connection caching
|
|
expire time.Duration
|
|
transport *transport
|
|
|
|
// health checking
|
|
probe *up.Probe
|
|
fails uint32
|
|
|
|
avgRtt int64
|
|
}
|
|
|
|
// NewProxy returns a new proxy.
|
|
func NewProxy(addr string, tlsConfig *tls.Config) *Proxy {
|
|
p := &Proxy{
|
|
addr: addr,
|
|
fails: 0,
|
|
probe: up.New(),
|
|
transport: newTransport(addr, tlsConfig),
|
|
avgRtt: int64(timeout / 2),
|
|
}
|
|
p.client = dnsClient(tlsConfig)
|
|
runtime.SetFinalizer(p, (*Proxy).finalizer)
|
|
return p
|
|
}
|
|
|
|
// Addr returns the address to forward to.
|
|
func (p *Proxy) Addr() (addr string) { return p.addr }
|
|
|
|
// dnsClient returns a client used for health checking.
|
|
func dnsClient(tlsConfig *tls.Config) *dns.Client {
|
|
c := new(dns.Client)
|
|
c.Net = "udp"
|
|
// TODO(miek): this should be half of hcDuration?
|
|
c.ReadTimeout = 1 * time.Second
|
|
c.WriteTimeout = 1 * time.Second
|
|
|
|
if tlsConfig != nil {
|
|
c.Net = "tcp-tls"
|
|
c.TLSConfig = tlsConfig
|
|
}
|
|
return c
|
|
}
|
|
|
|
// SetTLSConfig sets the TLS config in the lower p.transport and in the healthchecking client.
|
|
func (p *Proxy) SetTLSConfig(cfg *tls.Config) {
|
|
p.transport.SetTLSConfig(cfg)
|
|
p.client = dnsClient(cfg)
|
|
}
|
|
|
|
// IsTLS returns true if proxy uses tls.
|
|
func (p *Proxy) IsTLS() bool { return p.transport.tlsConfig != nil }
|
|
|
|
// SetExpire sets the expire duration in the lower p.transport.
|
|
func (p *Proxy) SetExpire(expire time.Duration) { p.transport.SetExpire(expire) }
|
|
|
|
// Dial connects to the host in p with the configured transport.
|
|
func (p *Proxy) Dial(proto string) (*dns.Conn, bool, error) { return p.transport.Dial(proto) }
|
|
|
|
// Yield returns the connection to the pool.
|
|
func (p *Proxy) Yield(c *dns.Conn) { p.transport.Yield(c) }
|
|
|
|
// Healthcheck kicks of a round of health checks for this proxy.
|
|
func (p *Proxy) Healthcheck() { p.probe.Do(p.Check) }
|
|
|
|
// Down returns true if this proxy is down, i.e. has *more* fails than maxfails.
|
|
func (p *Proxy) Down(maxfails uint32) bool {
|
|
if maxfails == 0 {
|
|
return false
|
|
}
|
|
|
|
fails := atomic.LoadUint32(&p.fails)
|
|
return fails > maxfails
|
|
}
|
|
|
|
// close stops the health checking goroutine.
|
|
func (p *Proxy) close() {
|
|
p.probe.Stop()
|
|
}
|
|
|
|
func (p *Proxy) finalizer() {
|
|
p.transport.Stop()
|
|
}
|
|
|
|
// start starts the proxy's healthchecking.
|
|
func (p *Proxy) start(duration time.Duration) {
|
|
p.probe.Start(duration)
|
|
p.transport.Start()
|
|
}
|
|
|
|
const (
|
|
dialTimeout = 4 * time.Second
|
|
timeout = 2 * time.Second
|
|
maxTimeout = 2 * time.Second
|
|
minTimeout = 10 * time.Millisecond
|
|
hcDuration = 500 * time.Millisecond
|
|
)
|