Move *proxy* to external (#2651)

* Move *proxy* to external

move the proxy plugin into coredns/proxy and remove it as a default
plugin. Link the proxy to deprecated in plugin.cfg

coredns/proxy doesn't compile because of the vendoring :(

Signed-off-by: Miek Gieben <miek@miek.nl>

* Add github.com/coredns/proxy

Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
This commit is contained in:
Yong Tang 2019-03-03 23:32:38 -08:00 committed by Miek Gieben
parent dfa413af09
commit 9dd288943a
39 changed files with 53 additions and 1957 deletions

View file

@ -4,7 +4,7 @@
# you can start the fuzzing with: make -f Makefile.fuzz <plugin> # you can start the fuzzing with: make -f Makefile.fuzz <plugin>
# e.g. # e.g.
# #
# make -f Makefile.fuzz proxy # make -f Makefile.fuzz forward
# #
# Each plugin that wants to join the fuzzing fray only needs to add a fuzz.go that calls # Each plugin that wants to join the fuzzing fray only needs to add a fuzz.go that calls
# the plugins's ServeDNS and used the plugin/pkg/fuzz for the Do function. # the plugins's ServeDNS and used the plugin/pkg/fuzz for the Do function.

View file

@ -29,7 +29,6 @@ import (
_ "github.com/coredns/coredns/plugin/metrics" _ "github.com/coredns/coredns/plugin/metrics"
_ "github.com/coredns/coredns/plugin/nsid" _ "github.com/coredns/coredns/plugin/nsid"
_ "github.com/coredns/coredns/plugin/pprof" _ "github.com/coredns/coredns/plugin/pprof"
_ "github.com/coredns/coredns/plugin/proxy"
_ "github.com/coredns/coredns/plugin/reload" _ "github.com/coredns/coredns/plugin/reload"
_ "github.com/coredns/coredns/plugin/rewrite" _ "github.com/coredns/coredns/plugin/rewrite"
_ "github.com/coredns/coredns/plugin/root" _ "github.com/coredns/coredns/plugin/root"
@ -39,5 +38,6 @@ import (
_ "github.com/coredns/coredns/plugin/tls" _ "github.com/coredns/coredns/plugin/tls"
_ "github.com/coredns/coredns/plugin/trace" _ "github.com/coredns/coredns/plugin/trace"
_ "github.com/coredns/coredns/plugin/whoami" _ "github.com/coredns/coredns/plugin/whoami"
_ "github.com/coredns/proxy"
_ "github.com/mholt/caddy/onevent" _ "github.com/mholt/caddy/onevent"
) )

1
go.mod
View file

@ -8,6 +8,7 @@ require (
github.com/Shopify/sarama v1.17.0 github.com/Shopify/sarama v1.17.0
github.com/apache/thrift v0.12.0 github.com/apache/thrift v0.12.0
github.com/aws/aws-sdk-go v1.14.17 github.com/aws/aws-sdk-go v1.14.17
github.com/coredns/proxy v0.0.0-20190303110311-afc937d015b9
github.com/coreos/etcd v3.3.11+incompatible github.com/coreos/etcd v3.3.11+incompatible
github.com/davecgh/go-spew v1.1.0 github.com/davecgh/go-spew v1.1.0
github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11 github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11

2
go.sum
View file

@ -10,6 +10,8 @@ github.com/aws/aws-sdk-go v1.14.17 h1:T6YqsDgkg5WA00ZOpPU701dNhlpcS/HN1cW0VpvG0M
github.com/aws/aws-sdk-go v1.14.17/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= github.com/aws/aws-sdk-go v1.14.17/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/coredns/proxy v0.0.0-20190303110311-afc937d015b9 h1:Y1pVzULwQG8rsh4jFpxaVZfaR8qTfM5VtweoBvZ56Vs=
github.com/coredns/proxy v0.0.0-20190303110311-afc937d015b9/go.mod h1:BXx1AhWPTL4TJqe70SRiT1MT41j8yWJGU6kEInPiDAA=
github.com/coreos/etcd v3.3.11+incompatible h1:0gCnqKsq7XxMi69JsnbmMc1o+RJH3XH64sV9aiTTYko= github.com/coreos/etcd v3.3.11+incompatible h1:0gCnqKsq7XxMi69JsnbmMc1o+RJH3XH64sV9aiTTYko=
github.com/coreos/etcd v3.3.11+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.11+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=

View file

@ -1,244 +0,0 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-PROXY" "7" "March 2019" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIproxy\fR \- facilitates both a basic reverse proxy and a robust load balancer\.
.
.SH "DESCRIPTION"
The proxy has support for multiple backends\. The load balancing features include multiple policies, health checks, and failovers\. If all hosts fail their health check the proxy plugin will fail back to randomly selecting a target and sending packets to it\.
.
.SH "SYNTAX"
In its most basic form, a simple reverse proxy uses this syntax:
.
.IP "" 4
.
.nf
proxy FROM TO
.
.fi
.
.IP "" 0
.
.IP "\(bu" 4
\fBFROM\fR is the base domain to match for the request to be proxied\.
.
.IP "\(bu" 4
\fBTO\fR is the destination endpoint to proxy to\.
.
.IP "" 0
.
.P
However, advanced features including load balancing can be utilized with an expanded syntax:
.
.IP "" 4
.
.nf
proxy FROM TO\.\.\. {
policy random|least_conn|round_robin|sequential
fail_timeout DURATION
max_fails INTEGER
health_check PATH:PORT [DURATION]
except IGNORED_NAMES\.\.\.
spray
protocol [dns [force_tcp]|grpc [insecure|CACERT|KEY CERT|KEY CERT CACERT]]
}
.
.fi
.
.IP "" 0
.
.IP "\(bu" 4
\fBFROM\fR is the name to match for the request to be proxied\.
.
.IP "\(bu" 4
\fBTO\fR is the destination endpoint to proxy to\. At least one is required, but multiple may be specified\. \fBTO\fR may be an IP:Port pair, or may reference a file in resolv\.conf format
.
.IP "\(bu" 4
\fBpolicy\fR is the load balancing policy to use; applies only with multiple backends\. May be one of random, least_conn, round_robin or sequential\. Default is random\.
.
.IP "\(bu" 4
\fBfail_timeout\fR specifies how long to consider a backend as down after it has failed\. While it is down, requests will not be routed to that backend\. A backend is "down" if CoreDNS fails to communicate with it\. The default value is 2 seconds ("2s")\.
.
.IP "\(bu" 4
\fBmax_fails\fR is the number of failures within fail_timeout that are needed before considering a backend to be down\. If 0, the backend will never be marked as down\. Default is 1\.
.
.IP "\(bu" 4
\fBhealth_check\fR will check \fBPATH\fR (on \fBPORT\fR) on each backend\. If a backend returns a status code of 200\-399, then that backend is marked healthy for double the healthcheck duration\. If it doesn\'t, it is marked as unhealthy and no requests are routed to it\. If this option is not provided then health checks are disabled\. The default duration is 4 seconds ("4s")\.
.
.IP "\(bu" 4
\fBIGNORED_NAMES\fR in \fBexcept\fR is a space\-separated list of domains to exclude from proxying\. Requests that match none of these names will be passed through\.
.
.IP "\(bu" 4
\fBspray\fR when all backends are unhealthy, randomly pick one to send the traffic to\. (This is a failsafe\.)
.
.IP "\(bu" 4
\fBprotocol\fR specifies what protocol to use to speak to an upstream, \fBdns\fR (the default) is plain old DNS\. The \fBgrpc\fR option will talk to a server that has implemented the DnsService \fIhttps://github\.com/coredns/coredns/blob/master/pb/dns\.proto\fR\.
.
.IP "" 0
.
.SH "POLICIES"
There are four load\-balancing policies available:
.
.IP "\(bu" 4
\fBrandom\fR (default) \- Randomly select a backend
.
.IP "\(bu" 4
\fBleast_conn\fR \- Select the backend with the fewest active connections
.
.IP "\(bu" 4
\fBround_robin\fR \- Select the backend in round\-robin fashion
.
.IP "\(bu" 4
\fBsequential\fR \- Select the first available backend looking by order of declaration from left to right
.
.IP "\(bu" 4
\fBfirst\fR \- Deprecated\. Use sequential instead
.
.IP "" 0
.
.P
All polices implement randomly spraying packets to backend hosts when \fIno healthy\fR hosts are available\. This is to preempt the case where the healthchecking (as a mechanism) fails\.
.
.SH "UPSTREAM PROTOCOLS"
.
.TP
\fBdns\fR
uses the standard DNS exchange\. You can pass \fBforce_tcp\fR to make sure that the proxied connection is performed over TCP, regardless of the inbound request\'s protocol\.
.
.TP
\fBgrpc\fR
extra options are used to control how the TLS connection is made to the gRPC server\.
.
.IP "\(bu" 4
None \- No client authentication is used, and the system CAs are used to verify the server certificate\.
.
.IP "\(bu" 4
\fBinsecure\fR \- TLS is not used, the connection is made in plaintext (not good in production)\.
.
.IP "\(bu" 4
\fBCACERT\fR \- No client authentication is used, and the file \fBCACERT\fR is used to verify the server certificate\.
.
.IP "\(bu" 4
\fBKEY\fR \fBCERT\fR \- Client authentication is used with the specified key/cert pair\. The server certificate is verified with the system CAs\.
.
.IP "\(bu" 4
\fBKEY\fR \fBCERT\fR \fBCACERT\fR \- Client authentication is used with the specified key/cert pair\. The server certificate is verified using the \fBCACERT\fR file\.
.
.IP "" 0
.
.SH "METRICS"
If monitoring is enabled (via the \fIprometheus\fR directive) then the following metric is exported:
.
.IP "\(bu" 4
\fBcoredns_proxy_request_duration_seconds{server, proto, proto_proxy, family, to}\fR \- duration per upstream interaction\.
.
.IP "\(bu" 4
\fBcoredns_proxy_request_count_total{server, proto, proto_proxy, family, to}\fR \- query count per upstream\.
.
.IP "" 0
.
.P
Where \fBproxy_proto\fR is the protocol used (\fBdns\fR or \fBgrpc\fR) and \fBto\fR is \fBTO\fR specified in the config, \fBproto\fR is the protocol used by the incoming query ("tcp" or "udp"), family the transport family ("1" for IPv4, and "2" for IPv6)\. \fBServer\fR is the server responsible for the request (and metric)\. See the documentation in the metrics plugin\.
.
.SH "EXAMPLES"
Proxy all requests within example\.org\. to a backend system:
.
.IP "" 4
.
.nf
proxy example\.org 127\.0\.0\.1:9005
.
.fi
.
.IP "" 0
.
.P
Load\-balance all requests between three backends (using random policy):
.
.IP "" 4
.
.nf
\&\. {
proxy \. 10\.0\.0\.10:53 10\.0\.0\.11:1053 10\.0\.0\.12
}
.
.fi
.
.IP "" 0
.
.P
Same as above, but round\-robin style:
.
.IP "" 4
.
.nf
\&\. {
proxy \. 10\.0\.0\.10:53 10\.0\.0\.11:1053 10\.0\.0\.12 {
policy round_robin
}
}
.
.fi
.
.IP "" 0
.
.P
With health checks and proxy headers to pass hostname, IP, and scheme upstream:
.
.IP "" 4
.
.nf
\&\. {
proxy \. 10\.0\.0\.11:53 10\.0\.0\.11:53 10\.0\.0\.12:53 {
policy round_robin
health_check /health:8080
}
}
.
.fi
.
.IP "" 0
.
.P
Proxy everything except requests to miek\.nl or example\.org
.
.IP "" 4
.
.nf
\&\. {
proxy \. 10\.0\.0\.10:1234 {
except miek\.nl example\.org
}
}
.
.fi
.
.IP "" 0
.
.P
Proxy everything except \fBexample\.org\fR using the host\'s \fBresolv\.conf\fR\'s nameservers:
.
.IP "" 4
.
.nf
\&\. {
proxy \. /etc/resolv\.conf {
except example\.org
}
}
.
.fi
.
.IP "" 0

View file

@ -51,7 +51,7 @@ secondary:secondary
etcd:etcd etcd:etcd
loop:loop loop:loop
forward:forward forward:forward
proxy:proxy proxy:github.com/coredns/proxy
erratic:erratic erratic:erratic
whoami:whoami whoami:whoami
on:github.com/mholt/caddy/onevent on:github.com/mholt/caddy/onevent

View file

@ -112,7 +112,7 @@ to appear was the *reverse* plugin that synthesis PTR and A/AAAA responses (usef
The nature of the *reverse* plugin is such that it only deals with A,AAAA and PTR and then only The nature of the *reverse* plugin is such that it only deals with A,AAAA and PTR and then only
for a subset of the names. Ideally you would want to layer *reverse* **in front off** another for a subset of the names. Ideally you would want to layer *reverse* **in front off** another
plugin such as *file* or *auto* (or even *proxy*). This means *reverse* handles some special plugin such as *file* or *auto* (or even *forward*). This means *reverse* handles some special
reverse cases and **all other** request are handled by the backing plugin. This is exactly what reverse cases and **all other** request are handled by the backing plugin. This is exactly what
"fallthrough" does. To keep things explicit we've opted that plugins implement such behavior "fallthrough" does. To keep things explicit we've opted that plugins implement such behavior
should implement a `fallthrough` keyword. should implement a `fallthrough` keyword.

View file

@ -88,13 +88,14 @@ Proxy to Google Public DNS and only cache responses for example.org (or below).
~~~ corefile ~~~ corefile
. { . {
proxy . 8.8.8.8:53 forward . 8.8.8.8:53
cache example.org cache example.org
} }
~~~ ~~~
Enable caching for all zones, keep a positive cache size of 5000 and a negative cache size of 2500: Enable caching for all zones, keep a positive cache size of 5000 and a negative cache size of 2500:
~~~ corefile
~~~ corefile
. { . {
cache { cache {
success 5000 success 5000

View file

@ -10,7 +10,7 @@ The data in etcd instance has to be encoded as
a [message](https://github.com/skynetservices/skydns/blob/2fcff74cdc9f9a7dd64189a447ef27ac354b725f/msg/service.go#L26) a [message](https://github.com/skynetservices/skydns/blob/2fcff74cdc9f9a7dd64189a447ef27ac354b725f/msg/service.go#L26)
like [SkyDNS](https://github.com/skynetservices/skydns). It should also work just like SkyDNS. like [SkyDNS](https://github.com/skynetservices/skydns). It should also work just like SkyDNS.
The etcd plugin makes extensive use of the proxy plugin to forward and query other servers in the The etcd plugin makes extensive use of the forward plugin to forward and query other servers in the
network. network.
## Syntax ## Syntax
@ -46,7 +46,7 @@ etcd [ZONES...] {
* `credentials` is used to set the **USERNAME** and **PASSWORD** for accessing the etcd cluster. * `credentials` is used to set the **USERNAME** and **PASSWORD** for accessing the etcd cluster.
* `upstream` upstream resolvers to be used resolve external names found in etcd (think CNAMEs) * `upstream` upstream resolvers to be used resolve external names found in etcd (think CNAMEs)
pointing to external names. If you want CoreDNS to act as a proxy for clients, you'll need to add pointing to external names. If you want CoreDNS to act as a proxy for clients, you'll need to add
the proxy plugin. If no **ADDRESS** is given, CoreDNS will resolve CNAMEs against itself. the *forward* plugin. If no **ADDRESS** is given, CoreDNS will resolve CNAMEs against itself.
**ADDRESS** can be an IP address, and IP:port or a string pointing to a file that is structured **ADDRESS** can be an IP address, and IP:port or a string pointing to a file that is structured
as /etc/resolv.conf. as /etc/resolv.conf.
* `tls` followed by: * `tls` followed by:
@ -85,7 +85,7 @@ This is the default SkyDNS setup, with everything specified in full:
prometheus prometheus
cache 160 skydns.local cache 160 skydns.local
loadbalance loadbalance
proxy . 8.8.8.8:53 8.8.4.4:53 forward . 8.8.8.8:53 8.8.4.4:53
} }
~~~ ~~~
@ -99,7 +99,7 @@ when resolving external pointing CNAMEs.
upstream upstream
} }
cache 160 skydns.local cache 160 skydns.local
proxy . /etc/resolv.conf forward . /etc/resolv.conf
} }
~~~ ~~~

View file

@ -39,8 +39,6 @@ func setup(c *caddy.Controller) error {
func etcdParse(c *caddy.Controller) (*Etcd, error) { func etcdParse(c *caddy.Controller) (*Etcd, error) {
etc := Etcd{ etc := Etcd{
// Don't default to a proxy for lookups.
// Proxy: proxy.NewLookup([]string{"8.8.8.8:53", "8.8.4.4:53"}),
PathPrefix: "skydns", PathPrefix: "skydns",
Ctx: context.Background(), Ctx: context.Background(),
} }

View file

@ -14,7 +14,7 @@ cluster. See the [deployment](https://github.com/coredns/deployment) repository
to deploy CoreDNS in Kubernetes](https://github.com/coredns/deployment/tree/master/kubernetes). to deploy CoreDNS in Kubernetes](https://github.com/coredns/deployment/tree/master/kubernetes).
[stubDomains and upstreamNameservers](https://kubernetes.io/blog/2017/04/configuring-private-dns-zones-upstream-nameservers-kubernetes/) [stubDomains and upstreamNameservers](https://kubernetes.io/blog/2017/04/configuring-private-dns-zones-upstream-nameservers-kubernetes/)
are implemented via the *proxy* plugin and kubernetes *upstream*. See example below. are implemented via the *forward* plugin and kubernetes *upstream*. See the examples below.
This plugin can only be used once per Server Block. This plugin can only be used once per Server Block.
@ -147,28 +147,33 @@ kubernetes cluster.local {
## stubDomains and upstreamNameservers ## stubDomains and upstreamNameservers
Here we use the *proxy* plugin to implement a stubDomain that forwards `example.local` to the nameserver `10.100.0.10:53`. Here we use the *forward* plugin to implement a stubDomain that forwards `example.local` to the nameserver `10.100.0.10:53`.
The *upstream* option in the *kubernetes* plugin means that ExternalName services (CNAMEs) will be resolved using the respective proxy. The *upstream* option in the *kubernetes* plugin means that ExternalName services (CNAMEs) will be resolved using the respective proxy.
Also configured is an upstreamNameserver `8.8.8.8:53` that will be used for resolving names that do not fall in `cluster.local` Also configured is an upstreamNameserver `8.8.8.8:53` that will be used for resolving names that do not fall in `cluster.local`
or `example.local`. or `example.local`.
~~~ txt ~~~ txt
.:53 { cluster.local:53 {
kubernetes cluster.local { kubernetes cluster.local {
upstream upstream
} }
proxy example.local 10.100.0.10:53 }
proxy . 8.8.8.8:53 example.local {
forward . 10.100.0.10:53
}
. {
forward . 8.8.8.8:53
} }
~~~ ~~~
The configuration above represents the following Kube-DNS stubDomains and upstreamNameservers configuration. The configuration above represents the following Kube-DNS stubDomains and upstreamNameservers configuration.
~~~ txt ~~~ txt
stubDomains: | stubDomains: |
{“example.local”: [“10.100.0.10:53”]} {“example.local”: [“10.100.0.10:53”]}
upstreamNameservers: | upstreamNameservers: |
[“8.8.8.8:53”] [“8.8.8.8:53”]
~~~ ~~~
## AutoPath ## AutoPath

View file

@ -60,10 +60,10 @@ A forwarding loop is usually caused by:
* Most commonly, CoreDNS forwarding requests directly to itself. e.g. via a loopback address such as `127.0.0.1`, `::1` or `127.0.0.53` * Most commonly, CoreDNS forwarding requests directly to itself. e.g. via a loopback address such as `127.0.0.1`, `::1` or `127.0.0.53`
* Less commonly, CoreDNS forwarding to an upstream server that in turn, forwards requests back to CoreDNS. * Less commonly, CoreDNS forwarding to an upstream server that in turn, forwards requests back to CoreDNS.
To troubleshoot this problem, look in your Corefile for any `proxy` or `forward` to the zone To troubleshoot this problem, look in your Corefile for any `forward`s to the zone
in which the loop was detected. Make sure that they are not forwarding to a local address or in which the loop was detected. Make sure that they are not forwarding to a local address or
to another DNS server that is forwarding requests back to CoreDNS. If `proxy` or `forward` are to another DNS server that is forwarding requests back to CoreDNS. If `forward` is
using a file (e.g. `/etc/resolv.conf`), make sure that file does not contain local addresses. using a file (e.g. `/etc/resolv.conf`), make sure that file does not contain local addresses.
### Troubleshooting Loops In Kubernetes Clusters ### Troubleshooting Loops In Kubernetes Clusters
@ -75,7 +75,7 @@ on the host node (e.g. `systemd-resolved`). For example, in certain configurati
put the loopback address `127.0.0.53` as a nameserver into `/etc/resolv.conf`. Kubernetes (via `kubelet`) by default put the loopback address `127.0.0.53` as a nameserver into `/etc/resolv.conf`. Kubernetes (via `kubelet`) by default
will pass this `/etc/resolv.conf` file to all Pods using the `default` dnsPolicy rendering them will pass this `/etc/resolv.conf` file to all Pods using the `default` dnsPolicy rendering them
unable to make DNS lookups (this includes CoreDNS Pods). CoreDNS uses this `/etc/resolv.conf` unable to make DNS lookups (this includes CoreDNS Pods). CoreDNS uses this `/etc/resolv.conf`
as a list of upstreams to proxy/forward requests to. Since it contains a loopback address, CoreDNS ends up forwarding as a list of upstreams to forward requests to. Since it contains a loopback address, CoreDNS ends up forwarding
requests to itself. requests to itself.
There are many ways to work around this issue, some are listed here: There are many ways to work around this issue, some are listed here:
@ -86,6 +86,6 @@ There are many ways to work around this issue, some are listed here:
`/run/systemd/resolve/resolv.conf` is typically the location of the "real" `resolv.conf`, `/run/systemd/resolve/resolv.conf` is typically the location of the "real" `resolv.conf`,
although this can be different depending on your distribution. although this can be different depending on your distribution.
* Disable the local DNS cache on host nodes, and restore `/etc/resolv.conf` to the original. * Disable the local DNS cache on host nodes, and restore `/etc/resolv.conf` to the original.
* A quick and dirty fix is to edit your Corefile, replacing `proxy . /etc/resolv.conf` with * A quick and dirty fix is to edit your Corefile, replacing `forward . /etc/resolv.conf` with
the ip address of your upstream DNS, for example `proxy . 8.8.8.8`. But this only fixes the issue for CoreDNS, the ip address of your upstream DNS, for example `forward . 8.8.8.8`. But this only fixes the issue for CoreDNS,
kubelet will continue to forward the invalid `resolv.conf` to all `default` dnsPolicy Pods, leaving them unable to resolve DNS. kubelet will continue to forward the invalid `resolv.conf` to all `default` dnsPolicy Pods, leaving them unable to resolve DNS.

View file

@ -1,10 +0,0 @@
reviewers:
- fturib
- grobie
- johnbelamaric
- miekg
approvers:
- fturib
- grobie
- johnbelamaric
- miekg

View file

@ -1,160 +0,0 @@
# proxy
## Name
*proxy* - facilitates both a basic reverse proxy and a robust load balancer.
## Description
The proxy has support for multiple backends. The load balancing features include multiple policies,
health checks, and failovers. If all hosts fail their health check the proxy plugin will fail
back to randomly selecting a target and sending packets to it.
## Syntax
In its most basic form, a simple reverse proxy uses this syntax:
~~~
proxy FROM TO
~~~
* **FROM** is the base domain to match for the request to be proxied.
* **TO** is the destination endpoint to proxy to.
However, advanced features including load balancing can be utilized with an expanded syntax:
~~~
proxy FROM TO... {
policy random|least_conn|round_robin|sequential
fail_timeout DURATION
max_fails INTEGER
health_check PATH:PORT [DURATION]
except IGNORED_NAMES...
spray
protocol [dns [force_tcp]|grpc [insecure|CACERT|KEY CERT|KEY CERT CACERT]]
}
~~~
* **FROM** is the name to match for the request to be proxied.
* **TO** is the destination endpoint to proxy to. At least one is required, but multiple may be
specified. **TO** may be an IP:Port pair, or may reference a file in resolv.conf format
* `policy` is the load balancing policy to use; applies only with multiple backends. May be one of
random, least_conn, round_robin or sequential. Default is random.
* `fail_timeout` specifies how long to consider a backend as down after it has failed. While it is
down, requests will not be routed to that backend. A backend is "down" if CoreDNS fails to
communicate with it. The default value is 2 seconds ("2s").
* `max_fails` is the number of failures within fail_timeout that are needed before considering
a backend to be down. If 0, the backend will never be marked as down. Default is 1.
* `health_check` will check **PATH** (on **PORT**) on each backend. If a backend returns a status code of
200-399, then that backend is marked healthy for double the healthcheck duration. If it doesn't,
it is marked as unhealthy and no requests are routed to it. If this option is not provided then
health checks are disabled. The default duration is 4 seconds ("4s").
* **IGNORED_NAMES** in `except` is a space-separated list of domains to exclude from proxying.
Requests that match none of these names will be passed through.
* `spray` when all backends are unhealthy, randomly pick one to send the traffic to. (This is
a failsafe.)
* `protocol` specifies what protocol to use to speak to an upstream, `dns` (the default) is plain
old DNS. The `grpc` option will talk to a server that has implemented
the [DnsService](https://github.com/coredns/coredns/blob/master/pb/dns.proto).
## Policies
There are four load-balancing policies available:
* `random` (default) - Randomly select a backend
* `least_conn` - Select the backend with the fewest active connections
* `round_robin` - Select the backend in round-robin fashion
* `sequential` - Select the first available backend looking by order of declaration from left to right
* `first` - Deprecated. Use sequential instead
All polices implement randomly spraying packets to backend hosts when *no healthy* hosts are
available. This is to preempt the case where the healthchecking (as a mechanism) fails.
## Upstream Protocols
`dns`
: uses the standard DNS exchange. You can pass `force_tcp` to make sure that the proxied connection is performed
over TCP, regardless of the inbound request's protocol.
`grpc`
: extra options are used to control how the TLS connection is made to the gRPC server.
* None - No client authentication is used, and the system CAs are used to verify the server certificate.
* `insecure` - TLS is not used, the connection is made in plaintext (not good in production).
* **CACERT** - No client authentication is used, and the file **CACERT** is used to verify the server certificate.
* **KEY** **CERT** - Client authentication is used with the specified key/cert pair. The server
certificate is verified with the system CAs.
* **KEY** **CERT** **CACERT** - Client authentication is used with the specified key/cert pair. The
server certificate is verified using the **CACERT** file.
## Metrics
If monitoring is enabled (via the *prometheus* directive) then the following metric is exported:
* `coredns_proxy_request_duration_seconds{server, proto, proto_proxy, family, to}` - duration per
upstream interaction.
* `coredns_proxy_request_count_total{server, proto, proto_proxy, family, to}` - query count per
upstream.
Where `proxy_proto` is the protocol used (`dns` or `grpc`) and `to` is **TO**
specified in the config, `proto` is the protocol used by the incoming query ("tcp" or "udp"), family
the transport family ("1" for IPv4, and "2" for IPv6). `Server` is the server responsible for the
request (and metric). See the documentation in the metrics plugin.
## Examples
Proxy all requests within example.org. to a backend system:
~~~
proxy example.org 127.0.0.1:9005
~~~
Load-balance all requests between three backends (using random policy):
~~~ corefile
. {
proxy . 10.0.0.10:53 10.0.0.11:1053 10.0.0.12
}
~~~
Same as above, but round-robin style:
~~~ corefile
. {
proxy . 10.0.0.10:53 10.0.0.11:1053 10.0.0.12 {
policy round_robin
}
}
~~~
With health checks and proxy headers to pass hostname, IP, and scheme upstream:
~~~ corefile
. {
proxy . 10.0.0.11:53 10.0.0.11:53 10.0.0.12:53 {
policy round_robin
health_check /health:8080
}
}
~~~
Proxy everything except requests to miek.nl or example.org
~~~
. {
proxy . 10.0.0.10:1234 {
except miek.nl example.org
}
}
~~~
Proxy everything except `example.org` using the host's `resolv.conf`'s nameservers:
~~~ corefile
. {
proxy . /etc/resolv.conf {
except example.org
}
}
~~~

View file

@ -1,100 +0,0 @@
package proxy
import (
"context"
"net"
"time"
"github.com/coredns/coredns/request"
"github.com/miekg/dns"
)
type dnsEx struct {
Timeout time.Duration
Options
}
// Options define the options understood by dns.Exchange.
type Options struct {
ForceTCP bool // If true use TCP for upstream no matter what
}
func newDNSEx() *dnsEx {
return newDNSExWithOption(Options{})
}
func newDNSExWithOption(opt Options) *dnsEx {
return &dnsEx{Timeout: defaultTimeout * time.Second, Options: opt}
}
func (d *dnsEx) Transport() string {
if d.Options.ForceTCP {
return "tcp"
}
// The protocol will be determined by `state.Proto()` during Exchange.
return ""
}
func (d *dnsEx) Protocol() string { return "dns" }
func (d *dnsEx) OnShutdown(p *Proxy) error { return nil }
func (d *dnsEx) OnStartup(p *Proxy) error { return nil }
// Exchange implements the Exchanger interface.
func (d *dnsEx) Exchange(ctx context.Context, addr string, state request.Request) (*dns.Msg, error) {
proto := state.Proto()
if d.Options.ForceTCP {
proto = "tcp"
}
co, err := net.DialTimeout(proto, addr, d.Timeout)
if err != nil {
return nil, err
}
reply, _, err := d.ExchangeConn(state.Req, co)
co.Close()
if reply != nil && reply.Truncated {
// Suppress proxy error for truncated responses
err = nil
}
if err != nil {
return nil, err
}
reply.Id = state.Req.Id
return reply, nil
}
func (d *dnsEx) ExchangeConn(m *dns.Msg, co net.Conn) (*dns.Msg, time.Duration, error) {
start := time.Now()
r, err := exchange(m, co)
rtt := time.Since(start)
return r, rtt, err
}
func exchange(m *dns.Msg, co net.Conn) (*dns.Msg, error) {
opt := m.IsEdns0()
udpsize := uint16(dns.MinMsgSize)
// If EDNS0 is used use that for size.
if opt != nil && opt.UDPSize() >= dns.MinMsgSize {
udpsize = opt.UDPSize()
}
dnsco := &dns.Conn{Conn: co, UDPSize: udpsize}
writeDeadline := time.Now().Add(defaultTimeout)
dnsco.SetWriteDeadline(writeDeadline)
if err := dnsco.WriteMsg(m); err != nil {
log.Debugf("Failed to send message: %v", err)
return nil, err
}
readDeadline := time.Now().Add(defaultTimeout)
co.SetReadDeadline(readDeadline)
return dnsco.ReadMsg()
}

View file

@ -1,56 +0,0 @@
package proxy
import (
"context"
"time"
"github.com/coredns/coredns/plugin/dnstap"
"github.com/coredns/coredns/plugin/dnstap/msg"
"github.com/coredns/coredns/request"
tap "github.com/dnstap/golang-dnstap"
"github.com/miekg/dns"
)
func toDnstap(ctx context.Context, host string, ex Exchanger, state request.Request, reply *dns.Msg, start time.Time) error {
tapper := dnstap.TapperFromContext(ctx)
if tapper == nil {
return nil
}
// Query
b := msg.New().Time(start).HostPort(host)
t := ex.Transport()
if t == "" {
t = state.Proto()
}
if t == "tcp" {
b.SocketProto = tap.SocketProtocol_TCP
} else {
b.SocketProto = tap.SocketProtocol_UDP
}
if tapper.Pack() {
b.Msg(state.Req)
}
m, err := b.ToOutsideQuery(tap.Message_FORWARDER_QUERY)
if err != nil {
return err
}
tapper.TapMessage(m)
// Response
if reply != nil {
if tapper.Pack() {
b.Msg(reply)
}
m, err := b.Time(time.Now()).ToOutsideResponse(tap.Message_FORWARDER_RESPONSE)
if err != nil {
return err
}
tapper.TapMessage(m)
}
return nil
}

View file

@ -1,57 +0,0 @@
package proxy
import (
"context"
"testing"
"time"
"github.com/coredns/coredns/plugin/dnstap/msg"
"github.com/coredns/coredns/plugin/dnstap/test"
mwtest "github.com/coredns/coredns/plugin/test"
"github.com/coredns/coredns/request"
tap "github.com/dnstap/golang-dnstap"
"github.com/miekg/dns"
)
func testCase(t *testing.T, ex Exchanger, q, r *dns.Msg, datq, datr *msg.Builder) {
tapq, _ := datq.ToOutsideQuery(tap.Message_FORWARDER_QUERY)
tapr, _ := datr.ToOutsideResponse(tap.Message_FORWARDER_RESPONSE)
ctx := test.Context{}
err := toDnstap(&ctx, "10.240.0.1:40212", ex,
request.Request{W: &mwtest.ResponseWriter{}, Req: q}, r, time.Now())
if err != nil {
t.Fatal(err)
}
if len(ctx.Trap) != 2 {
t.Fatalf("Messages: %d", len(ctx.Trap))
}
if !test.MsgEqual(ctx.Trap[0], tapq) {
t.Errorf("Want: %v\nhave: %v", tapq, ctx.Trap[0])
}
if !test.MsgEqual(ctx.Trap[1], tapr) {
t.Errorf("Want: %v\nhave: %v", tapr, ctx.Trap[1])
}
}
func TestDnstap(t *testing.T) {
q := mwtest.Case{Qname: "example.org", Qtype: dns.TypeA}.Msg()
r := mwtest.Case{
Qname: "example.org.", Qtype: dns.TypeA,
Answer: []dns.RR{
mwtest.A("example.org. 3600 IN A 10.0.0.1"),
},
}.Msg()
tapq, tapr := test.TestingData(), test.TestingData()
testCase(t, newDNSEx(), q, r, tapq, tapr)
tapq.SocketProto = tap.SocketProtocol_TCP
tapr.SocketProto = tap.SocketProtocol_TCP
testCase(t, newDNSExWithOption(Options{ForceTCP: true}), q, r, tapq, tapr)
}
func TestNoDnstap(t *testing.T) {
err := toDnstap(context.TODO(), "", nil, request.Request{}, nil, time.Now())
if err != nil {
t.Fatal(err)
}
}

View file

@ -1,18 +0,0 @@
package proxy
import (
"sync/atomic"
"github.com/coredns/coredns/plugin/pkg/healthcheck"
)
// Default CheckDown functions for use in the proxy plugin.
var checkDownFunc = func(upstream *staticUpstream) healthcheck.UpstreamHostDownFunc {
return func(uh *healthcheck.UpstreamHost) bool {
fails := atomic.LoadInt32(&uh.Fails)
if fails >= upstream.MaxFails && upstream.MaxFails != 0 {
return true
}
return false
}
}

View file

@ -1,23 +0,0 @@
package proxy
import (
"context"
"github.com/coredns/coredns/request"
"github.com/miekg/dns"
)
// Exchanger is an interface that specifies a type implementing a DNS resolver that
// can use whatever transport it likes.
type Exchanger interface {
Exchange(ctx context.Context, addr string, state request.Request) (*dns.Msg, error)
Protocol() string
// Transport returns the only transport protocol used by this Exchanger or "".
// If the return value is "", Exchange must use `state.Proto()`.
Transport() string
OnStartup(*Proxy) error
OnShutdown(*Proxy) error
}

View file

@ -1,21 +0,0 @@
// +build fuzz
package proxy
import (
"github.com/coredns/coredns/plugin/pkg/fuzz"
"github.com/mholt/caddy"
)
// Fuzz fuzzes proxy.
func Fuzz(data []byte) int {
c := caddy.NewTestController("dns", "proxy . 8.8.8.8:53")
up, err := NewStaticUpstreams(&c.Dispenser)
if err != nil {
return 0
}
p := &Proxy{Upstreams: &up}
return fuzz.Do(p, data)
}

View file

@ -1,99 +0,0 @@
package proxy
import (
"context"
"crypto/tls"
"fmt"
"github.com/coredns/coredns/pb"
"github.com/coredns/coredns/plugin/pkg/trace"
"github.com/coredns/coredns/request"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
"github.com/miekg/dns"
opentracing "github.com/opentracing/opentracing-go"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
type grpcClient struct {
dialOpts []grpc.DialOption
clients map[string]pb.DnsServiceClient
conns []*grpc.ClientConn
upstream *staticUpstream
}
func newGrpcClient(tls *tls.Config, u *staticUpstream) *grpcClient {
g := &grpcClient{upstream: u}
if tls == nil {
g.dialOpts = append(g.dialOpts, grpc.WithInsecure())
} else {
g.dialOpts = append(g.dialOpts, grpc.WithTransportCredentials(credentials.NewTLS(tls)))
}
g.clients = map[string]pb.DnsServiceClient{}
return g
}
func (g *grpcClient) Exchange(ctx context.Context, addr string, state request.Request) (*dns.Msg, error) {
msg, err := state.Req.Pack()
if err != nil {
return nil, err
}
if cl, ok := g.clients[addr]; ok {
reply, err := cl.Query(ctx, &pb.DnsPacket{Msg: msg})
if err != nil {
return nil, err
}
d := new(dns.Msg)
err = d.Unpack(reply.Msg)
if err != nil {
return nil, err
}
return d, nil
}
return nil, fmt.Errorf("grpc exchange - no connection available for host: %s ", addr)
}
func (g *grpcClient) Transport() string { return "tcp" }
func (g *grpcClient) Protocol() string { return "grpc" }
func (g *grpcClient) OnShutdown(p *Proxy) error {
g.clients = map[string]pb.DnsServiceClient{}
for i, conn := range g.conns {
err := conn.Close()
if err != nil {
log.Warningf("Error closing connection %d: %s\n", i, err)
}
}
g.conns = []*grpc.ClientConn{}
return nil
}
func (g *grpcClient) OnStartup(p *Proxy) error {
dialOpts := g.dialOpts
if p.Trace != nil {
if t, ok := p.Trace.(trace.Trace); ok {
onlyIfParent := func(parentSpanCtx opentracing.SpanContext, method string, req, resp interface{}) bool {
return parentSpanCtx != nil
}
intercept := otgrpc.OpenTracingClientInterceptor(t.Tracer(), otgrpc.IncludingSpans(onlyIfParent))
dialOpts = append(dialOpts, grpc.WithUnaryInterceptor(intercept))
} else {
log.Warningf("Wrong type for trace plugin reference: %s", p.Trace)
}
}
for _, host := range g.upstream.Hosts {
conn, err := grpc.Dial(host.Name, dialOpts...)
if err != nil {
log.Warningf("Skipping gRPC host '%s' due to Dial error: %s\n", host.Name, err)
} else {
g.clients[host.Name] = pb.NewDnsServiceClient(conn)
g.conns = append(g.conns, conn)
}
}
return nil
}

View file

@ -1,220 +0,0 @@
package proxy
import (
"context"
"fmt"
"testing"
"github.com/coredns/coredns/plugin/pkg/healthcheck"
"github.com/coredns/coredns/plugin/pkg/tls"
"github.com/coredns/coredns/plugin/test"
"github.com/coredns/coredns/request"
"github.com/miekg/dns"
"google.golang.org/grpc/grpclog"
)
func init() {
grpclog.SetLoggerV2(discardV2{})
}
func buildPool(size int) ([]*healthcheck.UpstreamHost, func(), error) {
ups := make([]*healthcheck.UpstreamHost, size)
srvs := []*dns.Server{}
errs := []error{}
for i := 0; i < size; i++ {
srv, addr, err := test.TCPServer("localhost:0")
if err != nil {
errs = append(errs, err)
continue
}
ups[i] = &healthcheck.UpstreamHost{Name: addr}
srvs = append(srvs, srv)
}
stopIt := func() {
for _, s := range srvs {
s.Shutdown()
}
}
if len(errs) > 0 {
go stopIt()
valErr := ""
for _, e := range errs {
valErr += fmt.Sprintf("%v\n", e)
}
return nil, nil, fmt.Errorf("error at allocation of the pool : %v", valErr)
}
return ups, stopIt, nil
}
func TestGRPCStartupShutdown(t *testing.T) {
pool, closePool, err := buildPool(2)
if err != nil {
t.Fatalf("Error creating the pool of upstream for the test : %s", err)
}
defer closePool()
upstream := &staticUpstream{
from: ".",
HealthCheck: healthcheck.HealthCheck{
Hosts: pool,
},
}
g := newGrpcClient(nil, upstream)
upstream.ex = g
p := &Proxy{}
p.Upstreams = &[]Upstream{upstream}
err = g.OnStartup(p)
if err != nil {
t.Fatalf("Error starting grpc client exchanger: %s", err)
}
if len(g.clients) != len(pool) {
t.Fatalf("Expected %d grpc clients but found %d", len(pool), len(g.clients))
}
err = g.OnShutdown(p)
if err != nil {
t.Fatalf("Error stopping grpc client exchanger: %s", err)
}
if len(g.clients) != 0 {
t.Errorf("Shutdown didn't remove clients, found %d", len(g.clients))
}
if len(g.conns) != 0 {
t.Errorf("Shutdown didn't remove conns, found %d", len(g.conns))
}
}
func TestGRPCRunAQuery(t *testing.T) {
pool, closePool, err := buildPool(2)
if err != nil {
t.Fatalf("Error creating the pool of upstream for the test : %s", err)
}
defer closePool()
upstream := &staticUpstream{
from: ".",
HealthCheck: healthcheck.HealthCheck{
Hosts: pool,
},
}
g := newGrpcClient(nil, upstream)
upstream.ex = g
p := &Proxy{}
p.Upstreams = &[]Upstream{upstream}
err = g.OnStartup(p)
if err != nil {
t.Fatalf("Error starting grpc client exchanger: %s", err)
}
// verify the client is usable, or an error is properly raised
state := request.Request{W: &test.ResponseWriter{}, Req: new(dns.Msg)}
g.Exchange(context.TODO(), "localhost:10053", state)
// verify that you have proper error if the hostname is unknwn or not registered
_, err = g.Exchange(context.TODO(), "invalid:10055", state)
if err == nil {
t.Errorf("Expecting a proper error when querying gRPC client with invalid hostname : %s", err)
}
err = g.OnShutdown(p)
if err != nil {
t.Fatalf("Error stopping grpc client exchanger: %s", err)
}
}
func TestGRPCRunAQueryOnSecureLinkWithInvalidCert(t *testing.T) {
pool, closePool, err := buildPool(1)
if err != nil {
t.Fatalf("Error creating the pool of upstream for the test : %s", err)
}
defer closePool()
upstream := &staticUpstream{
from: ".",
HealthCheck: healthcheck.HealthCheck{
Hosts: pool,
},
}
filename, rmFunc, err := test.TempFile("", aCert)
if err != nil {
t.Errorf("Error saving file : %s", err)
return
}
defer rmFunc()
tls, _ := tls.NewTLSClientConfig(filename)
// ignore error as the certificate is known valid
g := newGrpcClient(tls, upstream)
upstream.ex = g
p := &Proxy{}
p.Upstreams = &[]Upstream{upstream}
// Although dial will not work, it is not expected to have an error
err = g.OnStartup(p)
if err != nil {
t.Fatalf("Error starting grpc client exchanger: %s", err)
}
// verify that you have proper error if the hostname is unknwn or not registered
state := request.Request{W: &test.ResponseWriter{}, Req: new(dns.Msg)}
_, err = g.Exchange(context.TODO(), pool[0].Name+"-whatever", state)
if err == nil {
t.Errorf("Error in Exchange process : %s ", err)
}
err = g.OnShutdown(p)
if err != nil {
t.Fatalf("Error stopping grpc client exchanger: %s", err)
}
}
// discard is a Logger that outputs nothing.
type discardV2 struct{}
func (d discardV2) Info(args ...interface{}) {}
func (d discardV2) Infoln(args ...interface{}) {}
func (d discardV2) Infof(format string, args ...interface{}) {}
func (d discardV2) Warning(args ...interface{}) {}
func (d discardV2) Warningln(args ...interface{}) {}
func (d discardV2) Warningf(format string, args ...interface{}) {}
func (d discardV2) Error(args ...interface{}) {}
func (d discardV2) Errorln(args ...interface{}) {}
func (d discardV2) Errorf(format string, args ...interface{}) {}
func (d discardV2) Fatal(args ...interface{}) {}
func (d discardV2) Fatalln(args ...interface{}) {}
func (d discardV2) Fatalf(format string, args ...interface{}) {}
func (d discardV2) V(l int) bool { return true }
const (
aCert = `-----BEGIN CERTIFICATE-----
MIIDlDCCAnygAwIBAgIJAPaRnBJUE/FVMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX
aWRnaXRzIFB0eSBMdGQwHhcNMTcxMTI0MTM0OTQ3WhcNMTgxMTI0MTM0OTQ3WjBF
MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50
ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEAuTDeAoWS6tdZVcp/Vh3FlagbC+9Ohi5VjRXgkpcn9JopbcF5s2jpl1v+
cRpqkrmNNKLh8qOhmgdZQdh185VNe/iZ94H42qwKZ48vvnC5hLkk3MdgUT2ewgup
vZhy/Bb1bX+buCWkQa1u8SIilECMIPZHhBP4TuBUKJWK8bBEFAeUnxB5SCkX+un4
pctRlcfg8sX/ghADnp4e//YYDqex+1wQdFqM5zWhWDZAzc5Kdkyy9r+xXNfo4s1h
fI08f6F4skz1koxG2RXOzQ7OK4YxFwT2J6V72iyzUIlRGZTbYDvair/zm1kjTF1R
B1B+XLJF9oIB4BMZbekf033ZVaQ8YwIDAQABo4GGMIGDMDMGA1UdEQQsMCqHBH8A
AAGHBDR3AQGHBDR3AQCHBDR3KmSHBDR3KGSHBDR3KmWHBDR3KtIwHQYDVR0OBBYE
FFAEccLm7D/rN3fEe1fwzH7p0spAMB8GA1UdIwQYMBaAFFAEccLm7D/rN3fEe1fw
zH7p0spAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAF4zqaucNcK2
GwYfijwbbtgMqPEvbReUEXsC65riAPjksJQ9L2YxQ7K0RIugRizuD1DNQam+FSb0
cZEMEKzvMUIexbhZNFINWXY2X9yUS/oZd5pWP0WYIhn6qhmLvzl9XpxNPVzBXYWe
duMECCigU2x5tAGmFa6g/pXXOoZCBRzFXwXiuNhSyhJEEwODjLZ6vgbySuU2jso3
va4FKFDdVM16s1/RYOK5oM48XytCMB/JoYoSJHPfpt8LpVNAQEHMvPvHwuZBON/z
q8HFtDjT4pBpB8AfuzwtUZ/zJ5atwxa5+ahcqRnK2kX2RSINfyEy43FZjLlvjcGa
UIRTUJK1JKg=
-----END CERTIFICATE-----`
)

View file

@ -1,5 +0,0 @@
package proxy
import clog "github.com/coredns/coredns/plugin/pkg/log"
func init() { clog.Discard() }

View file

@ -1,36 +0,0 @@
package proxy
import (
"github.com/coredns/coredns/plugin"
"github.com/prometheus/client_golang/prometheus"
)
// Metrics the proxy plugin exports.
var (
RequestCount = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "proxy",
Name: "request_count_total",
Help: "Counter of requests made per protocol, proxy protocol, family and upstream.",
}, []string{"server", "proto", "proxy_proto", "family", "to"})
RequestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: plugin.Namespace,
Subsystem: "proxy",
Name: "request_duration_seconds",
Buckets: plugin.TimeBuckets,
Help: "Histogram of the time (in seconds) each request took.",
}, []string{"server", "proto", "proxy_proto", "family", "to"})
)
// familyToString returns the string form of either 1, or 2. Returns
// empty string is not a known family
func familyToString(f int) string {
if f == 1 {
return "1"
}
if f == 2 {
return "2"
}
return ""
}

View file

@ -1,183 +0,0 @@
// Package proxy is plugin that proxies requests.
package proxy
import (
"context"
"errors"
"fmt"
"net"
"sync/atomic"
"time"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/plugin/metrics"
"github.com/coredns/coredns/plugin/pkg/healthcheck"
"github.com/coredns/coredns/request"
"github.com/miekg/dns"
ot "github.com/opentracing/opentracing-go"
)
var (
errUnreachable = errors.New("unreachable backend")
errInvalidProtocol = errors.New("invalid protocol")
errInvalidDomain = errors.New("invalid path for proxy")
)
// Proxy represents a plugin instance that can proxy requests to another (DNS) server.
type Proxy struct {
Next plugin.Handler
// Upstreams is a pointer to a slice, so we can update the upstream (used for Google)
// midway.
Upstreams *[]Upstream
// Trace is the Trace plugin, if it is installed
// This is used by the grpc exchanger to trace through the grpc calls
Trace plugin.Handler
}
// Upstream manages a pool of proxy upstream hosts. Select should return a
// suitable upstream host, or nil if no such hosts are available.
type Upstream interface {
// The domain name this upstream host should be routed on.
From() string
// Selects an upstream host to be routed to.
Select() *healthcheck.UpstreamHost
// Checks if subdomain is not an ignored.
IsAllowedDomain(string) bool
// Exchanger returns the exchanger to be used for this upstream.
Exchanger() Exchanger
// Stops the upstream from proxying requests to shutdown goroutines cleanly.
Stop() error
}
// tryDuration is how long to try upstream hosts; failures result in
// immediate retries until this duration ends or we get a nil host.
var tryDuration = 16 * time.Second
// ServeDNS satisfies the plugin.Handler interface.
func (p Proxy) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
var span, child ot.Span
span = ot.SpanFromContext(ctx)
state := request.Request{W: w, Req: r}
upstream := p.match(state)
if upstream == nil {
return plugin.NextOrFailure(p.Name(), p.Next, ctx, w, r)
}
for {
start := time.Now()
var reply *dns.Msg
var backendErr error
// Since Select() should give us "up" hosts, keep retrying
// hosts until timeout (or until we get a nil host).
for time.Since(start) < tryDuration {
host := upstream.Select()
if host == nil {
return dns.RcodeServerFailure, fmt.Errorf("%s: %s", errUnreachable, "no upstream host")
}
if span != nil {
child = span.Tracer().StartSpan("exchange", ot.ChildOf(span.Context()))
ctx = ot.ContextWithSpan(ctx, child)
}
atomic.AddInt64(&host.Conns, 1)
RequestCount.WithLabelValues(metrics.WithServer(ctx), state.Proto(), upstream.Exchanger().Protocol(), familyToString(state.Family()), host.Name).Add(1)
reply, backendErr = upstream.Exchanger().Exchange(ctx, host.Name, state)
atomic.AddInt64(&host.Conns, -1)
if child != nil {
child.Finish()
}
taperr := toDnstap(ctx, host.Name, upstream.Exchanger(), state, reply, start)
if backendErr == nil {
// Check if the reply is correct; if not return FormErr.
if !state.Match(reply) {
formerr := state.ErrorMessage(dns.RcodeFormatError)
w.WriteMsg(formerr)
return 0, taperr
}
w.WriteMsg(reply)
RequestDuration.WithLabelValues(metrics.WithServer(ctx), state.Proto(), upstream.Exchanger().Protocol(), familyToString(state.Family()), host.Name).Observe(time.Since(start).Seconds())
return 0, taperr
}
// A "ANY isc.org" query is being dropped by ISC's nameserver, we see this as a i/o timeout, but
// would then mark our upstream is being broken. We should not do this if we consider the error temporary.
// Of course it could really be that our upstream is broken
if oe, ok := backendErr.(*net.OpError); ok {
// Note this keeps looping and trying until tryDuration is hit, at which point our client
// might be long gone...
if oe.Timeout() {
// Our upstream's upstream is probably messing up, continue with next selected
// host - which my be the *same* one as we don't set any uh.Fails.
continue
}
}
timeout := host.FailTimeout
if timeout == 0 {
timeout = defaultFailTimeout
}
atomic.AddInt32(&host.Fails, 1)
fails := atomic.LoadInt32(&host.Fails)
go func(host *healthcheck.UpstreamHost, timeout time.Duration) {
time.Sleep(timeout)
// we may go negative here, should be rectified by the HC.
atomic.AddInt32(&host.Fails, -1)
if fails%failureCheck == 0 { // Kick off healthcheck on every third failure.
host.HealthCheckURL()
}
}(host, timeout)
}
return dns.RcodeServerFailure, fmt.Errorf("%s: %s", errUnreachable, backendErr)
}
}
func (p Proxy) match(state request.Request) (u Upstream) {
if p.Upstreams == nil {
return nil
}
longestMatch := 0
for _, upstream := range *p.Upstreams {
from := upstream.From()
if !plugin.Name(from).Matches(state.Name()) || !upstream.IsAllowedDomain(state.Name()) {
continue
}
if lf := len(from); lf > longestMatch {
longestMatch = lf
u = upstream
}
}
return u
}
// Name implements the Handler interface.
func (p Proxy) Name() string { return "proxy" }
const (
defaultFailTimeout = 2 * time.Second
defaultTimeout = 5 * time.Second
failureCheck = 3
)

View file

@ -1,72 +0,0 @@
package proxy
import (
"fmt"
"net/http"
"net/http/httptest"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/mholt/caddy/caddyfile"
)
func TestStop(t *testing.T) {
config := "proxy . %s {\n health_check /healthcheck:%s %dms \n}"
tests := []struct {
intervalInMilliseconds int
numHealthcheckIntervals int
}{
{5, 1},
{5, 2},
{5, 3},
}
for i, test := range tests {
t.Run(fmt.Sprintf("Test %d", i), func(t *testing.T) {
// Set up proxy.
var counter int64
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
r.Body.Close()
atomic.AddInt64(&counter, 1)
}))
defer backend.Close()
port := backend.URL[17:] // Remove all crap up to the port
back := backend.URL[7:] // Remove http://
c := caddyfile.NewDispenser("Testfile", strings.NewReader(fmt.Sprintf(config, back, port, test.intervalInMilliseconds)))
upstreams, err := NewStaticUpstreams(&c)
if err != nil {
t.Errorf("Test %d, expected no error. Got: %s", i, err)
}
// Give some time for healthchecks to hit the server.
time.Sleep(time.Duration(test.intervalInMilliseconds*test.numHealthcheckIntervals) * time.Millisecond)
for _, upstream := range upstreams {
if err := upstream.Stop(); err != nil {
t.Errorf("Test %d, expected no error stopping upstream, got: %s", i, err)
}
}
counterAfterShutdown := atomic.LoadInt64(&counter)
// Give some time to see if healthchecks are still hitting the server.
time.Sleep(time.Duration(test.intervalInMilliseconds*test.numHealthcheckIntervals) * time.Millisecond)
if counterAfterShutdown == 0 {
t.Errorf("Test %d, Expected healthchecks to hit test server, got none", i)
}
// health checks are in a go routine now, so one may well occur after we shutdown,
// but we only ever expect one more
counterAfterWaiting := atomic.LoadInt64(&counter)
if counterAfterWaiting > (counterAfterShutdown + 1) {
t.Errorf("Test %d, expected no more healthchecks after shutdown. got: %d healthchecks after shutdown", i, counterAfterWaiting-counterAfterShutdown)
}
})
}
}

View file

@ -1,21 +0,0 @@
package proxy
import (
"net"
"github.com/miekg/dns"
)
type fakeBootWriter struct {
dns.ResponseWriter
}
func (w *fakeBootWriter) LocalAddr() net.Addr {
local := net.ParseIP("127.0.0.1")
return &net.UDPAddr{IP: local, Port: 53} // Port is not used here
}
func (w *fakeBootWriter) RemoteAddr() net.Addr {
remote := net.ParseIP("8.8.8.8")
return &net.UDPAddr{IP: remote, Port: 53} // Port is not used here
}

View file

@ -1,53 +0,0 @@
package proxy
import (
"github.com/coredns/coredns/core/dnsserver"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/plugin/metrics"
clog "github.com/coredns/coredns/plugin/pkg/log"
"github.com/mholt/caddy"
)
var log = clog.NewWithPlugin("proxy")
func init() {
caddy.RegisterPlugin("proxy", caddy.Plugin{
ServerType: "dns",
Action: setup,
})
}
func setup(c *caddy.Controller) error {
upstreams, err := NewStaticUpstreams(&c.Dispenser)
if err != nil {
return plugin.Error("proxy", err)
}
t := dnsserver.GetConfig(c).Handler("trace")
P := &Proxy{Trace: t}
dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler {
P.Next = next
P.Upstreams = &upstreams
return P
})
c.OnStartup(func() error {
metrics.MustRegister(c, RequestCount, RequestDuration)
return nil
})
for i := range upstreams {
u := upstreams[i]
c.OnStartup(func() error {
return u.Exchanger().OnStartup(P)
})
c.OnShutdown(func() error {
return u.Exchanger().OnShutdown(P)
})
// Register shutdown handlers.
c.OnShutdown(u.Stop)
}
return nil
}

View file

@ -1,203 +0,0 @@
package proxy
import (
"fmt"
"net"
"strconv"
"time"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/plugin/pkg/healthcheck"
"github.com/coredns/coredns/plugin/pkg/parse"
"github.com/coredns/coredns/plugin/pkg/tls"
"github.com/mholt/caddy/caddyfile"
"github.com/miekg/dns"
)
type staticUpstream struct {
from string
healthcheck.HealthCheck
IgnoredSubDomains []string
ex Exchanger
}
// NewStaticUpstreams parses the configuration input and sets up
// static upstreams for the proxy plugin.
func NewStaticUpstreams(c *caddyfile.Dispenser) ([]Upstream, error) {
var upstreams []Upstream
for c.Next() {
u, err := NewStaticUpstream(c)
if err != nil {
return upstreams, err
}
upstreams = append(upstreams, u)
}
return upstreams, nil
}
// NewStaticUpstream parses the configuration of a single upstream
// starting from the FROM
func NewStaticUpstream(c *caddyfile.Dispenser) (Upstream, error) {
upstream := &staticUpstream{
from: ".",
HealthCheck: healthcheck.HealthCheck{
FailTimeout: 5 * time.Second,
MaxFails: 3,
},
ex: newDNSEx(),
}
if !c.Args(&upstream.from) {
return upstream, c.ArgErr()
}
upstream.from = plugin.Host(upstream.from).Normalize()
to := c.RemainingArgs()
if len(to) == 0 {
return upstream, c.ArgErr()
}
// process the host list, substituting in any nameservers in files
toHosts, err := parse.HostPortOrFile(to...)
if err != nil {
return upstream, err
}
if len(toHosts) > max {
return upstream, fmt.Errorf("more than %d TOs configured: %d", max, len(toHosts))
}
for c.NextBlock() {
if err := parseBlock(c, upstream); err != nil {
return upstream, err
}
}
upstream.Hosts = make([]*healthcheck.UpstreamHost, len(toHosts))
for i, host := range toHosts {
uh := &healthcheck.UpstreamHost{
Name: host,
FailTimeout: upstream.FailTimeout,
CheckDown: checkDownFunc(upstream),
}
upstream.Hosts[i] = uh
}
upstream.Start()
return upstream, nil
}
func parseBlock(c *caddyfile.Dispenser, u *staticUpstream) error {
switch c.Val() {
case "policy":
if !c.NextArg() {
return c.ArgErr()
}
policyCreateFunc, ok := healthcheck.SupportedPolicies[c.Val()]
if !ok {
return c.ArgErr()
}
u.Policy = policyCreateFunc()
case "fail_timeout":
if !c.NextArg() {
return c.ArgErr()
}
dur, err := time.ParseDuration(c.Val())
if err != nil {
return err
}
u.FailTimeout = dur
case "max_fails":
if !c.NextArg() {
return c.ArgErr()
}
n, err := strconv.Atoi(c.Val())
if err != nil {
return err
}
u.MaxFails = int32(n)
case "health_check":
if !c.NextArg() {
return c.ArgErr()
}
var err error
u.HealthCheck.Path, u.HealthCheck.Port, err = net.SplitHostPort(c.Val())
if err != nil {
return err
}
u.HealthCheck.Interval = 4 * time.Second
if c.NextArg() {
dur, err := time.ParseDuration(c.Val())
if err != nil {
return err
}
u.HealthCheck.Interval = dur
}
case "except":
ignoredDomains := c.RemainingArgs()
if len(ignoredDomains) == 0 {
return c.ArgErr()
}
for i := 0; i < len(ignoredDomains); i++ {
ignoredDomains[i] = plugin.Host(ignoredDomains[i]).Normalize()
}
u.IgnoredSubDomains = ignoredDomains
case "spray":
u.Spray = &healthcheck.Spray{}
case "protocol":
encArgs := c.RemainingArgs()
if len(encArgs) == 0 {
return c.ArgErr()
}
switch encArgs[0] {
case "dns":
if len(encArgs) > 1 {
if encArgs[1] == "force_tcp" {
opts := Options{ForceTCP: true}
u.ex = newDNSExWithOption(opts)
} else {
return fmt.Errorf("only force_tcp allowed as parameter to dns")
}
} else {
u.ex = newDNSEx()
}
case "grpc":
if len(encArgs) == 2 && encArgs[1] == "insecure" {
u.ex = newGrpcClient(nil, u)
return nil
}
tls, err := tls.NewTLSConfigFromArgs(encArgs[1:]...)
if err != nil {
return err
}
u.ex = newGrpcClient(tls, u)
default:
return fmt.Errorf("%s: %s", errInvalidProtocol, encArgs[0])
}
default:
return c.Errf("unknown property '%s'", c.Val())
}
return nil
}
func (u *staticUpstream) IsAllowedDomain(name string) bool {
if dns.Name(name) == dns.Name(u.From()) {
return true
}
for _, ignoredSubDomain := range u.IgnoredSubDomains {
if plugin.Name(ignoredSubDomain).Matches(name) {
return false
}
}
return true
}
func (u *staticUpstream) Exchanger() Exchanger { return u.ex }
func (u *staticUpstream) From() string { return u.from }
const max = 15

View file

@ -1,327 +0,0 @@
package proxy
import (
"path/filepath"
"strings"
"testing"
"github.com/coredns/coredns/plugin/test"
"github.com/mholt/caddy"
)
func TestAllowedDomain(t *testing.T) {
upstream := &staticUpstream{
from: "miek.nl.",
IgnoredSubDomains: []string{"download.miek.nl.", "static.miek.nl."}, // closing dot mandatory
}
tests := []struct {
name string
expected bool
}{
{"miek.nl.", true},
{"download.miek.nl.", false},
{"static.miek.nl.", false},
{"blaat.miek.nl.", true},
}
for i, test := range tests {
isAllowed := upstream.IsAllowedDomain(test.name)
if test.expected != isAllowed {
t.Errorf("Test %d: expected %v found %v for %s", i+1, test.expected, isAllowed, test.name)
}
}
}
func TestProxyParse(t *testing.T) {
rmFunc, cert, key, ca := getPEMFiles(t)
defer rmFunc()
grpc1 := "proxy . 8.8.8.8:53 {\n protocol grpc " + ca + "\n}"
grpc2 := "proxy . 8.8.8.8:53 {\n protocol grpc " + cert + " " + key + "\n}"
grpc3 := "proxy . 8.8.8.8:53 {\n protocol grpc " + cert + " " + key + " " + ca + "\n}"
grpc4 := "proxy . 8.8.8.8:53 {\n protocol grpc " + key + "\n}"
tests := []struct {
inputUpstreams string
shouldErr bool
}{
{
`proxy . 8.8.8.8:53`,
false,
},
{
`proxy 10.0.0.0/24 8.8.8.8:53`,
false,
},
{
`
proxy . 8.8.8.8:53 {
policy round_robin
}`,
false,
},
{
`
proxy . 8.8.8.8:53 {
fail_timeout 5s
}`,
false,
},
{
`
proxy . 8.8.8.8:53 {
max_fails 10
}`,
false,
},
{
`
proxy . 8.8.8.8:53 {
health_check /health:8080
}`,
false,
},
{
`
proxy . 8.8.8.8:53 {
except miek.nl example.org 10.0.0.0/24
}`,
false,
},
{
`
proxy . 8.8.8.8:53 {
spray
}`,
false,
},
{
`
proxy . 8.8.8.8:53 {
error_option
}`,
true,
},
{
`
proxy . some_bogus_filename`,
true,
},
{
`
proxy . 8.8.8.8:53 {
protocol dns
}`,
false,
},
{
`
proxy . 8.8.8.8:53 {
protocol grpc
}`,
false,
},
{
`
proxy . 8.8.8.8:53 {
protocol grpc insecure
}`,
false,
},
{
`
proxy . 8.8.8.8:53 {
protocol dns force_tcp
}`,
false,
},
{
`
proxy . 8.8.8.8:53 {
protocol grpc a b c d
}`,
true,
},
{
grpc1,
false,
},
{
grpc2,
false,
},
{
grpc3,
false,
},
{
grpc4,
true,
},
{
`
proxy . 8.8.8.8:53 {
protocol foobar
}`,
true,
},
{
`proxy`,
true,
},
{
`
proxy . 8.8.8.8:53 {
protocol foobar
}`,
true,
},
{
`
proxy . 8.8.8.8:53 {
policy
}`,
true,
},
{
`
proxy . 8.8.8.8:53 {
fail_timeout
}`,
true,
},
{
`
proxy . 8.8.8.8:53 {
fail_timeout junky
}`,
true,
},
{
`
proxy . 8.8.8.8:53 {
health_check
}`,
true,
},
{
`
proxy . 8.8.8.8:53 {
protocol dns force
}`,
true,
},
}
for i, test := range tests {
c := caddy.NewTestController("dns", test.inputUpstreams)
_, err := NewStaticUpstreams(&c.Dispenser)
if (err != nil) != test.shouldErr {
t.Errorf("Test %d expected no error, got %v for %s", i+1, err, test.inputUpstreams)
}
}
}
func TestResolvParse(t *testing.T) {
tests := []struct {
inputUpstreams string
filedata string
shouldErr bool
expected []string
}{
{
`
proxy . FILE
`,
`
nameserver 1.2.3.4
nameserver 4.3.2.1
`,
false,
[]string{"1.2.3.4:53", "4.3.2.1:53"},
},
{
`
proxy example.com 1.1.1.1:5000
proxy . FILE
proxy example.org 2.2.2.2:1234
`,
`
nameserver 1.2.3.4
`,
false,
[]string{"1.1.1.1:5000", "1.2.3.4:53", "2.2.2.2:1234"},
},
{
`
proxy example.com 1.1.1.1:5000
proxy . FILE
proxy example.org 2.2.2.2:1234
`,
`
junky resolv.conf
`,
false,
[]string{"1.1.1.1:5000", "2.2.2.2:1234"},
},
}
for i, tc := range tests {
path, rm, err := test.TempFile(".", tc.filedata)
if err != nil {
t.Fatalf("Test %d could not create temp file %v", i, err)
}
defer rm()
config := strings.Replace(tc.inputUpstreams, "FILE", path, -1)
c := caddy.NewTestController("dns", config)
upstreams, err := NewStaticUpstreams(&c.Dispenser)
if (err != nil) != tc.shouldErr {
t.Errorf("Test %d expected no error, got %v", i+1, err)
}
var hosts []string
for _, u := range upstreams {
for _, h := range u.(*staticUpstream).Hosts {
hosts = append(hosts, h.Name)
}
}
if !tc.shouldErr {
if len(hosts) != len(tc.expected) {
t.Errorf("Test %d expected %d hosts got %d", i+1, len(tc.expected), len(upstreams))
} else {
ok := true
for i, v := range tc.expected {
if v != hosts[i] {
ok = false
}
}
if !ok {
t.Errorf("Test %d expected %v got %v", i+1, tc.expected, upstreams)
}
}
}
}
}
func TestMaxTo(t *testing.T) {
// Has 16 IP addresses.
config := `proxy . 1.1.1.1 1.1.1.1 1.1.1.1 1.1.1.1 1.1.1.1 1.1.1.1 1.1.1.1 1.1.1.1 1.1.1.1 1.1.1.1 1.1.1.1 1.1.1.1 1.1.1.1 1.1.1.1 1.1.1.1 1.1.1.1`
c := caddy.NewTestController("dns", config)
_, err := NewStaticUpstreams(&c.Dispenser)
if err == nil {
t.Error("Expected to many TOs configured, but nil")
}
}
func getPEMFiles(t *testing.T) (rmFunc func(), cert, key, ca string) {
tempDir, rmFunc, err := test.WritePEMFiles("")
if err != nil {
t.Fatalf("Could not write PEM files: %s", err)
}
cert = filepath.Join(tempDir, "cert.pem")
key = filepath.Join(tempDir, "key.pem")
ca = filepath.Join(tempDir, "ca.pem")
return
}

View file

@ -92,7 +92,7 @@ The `.invalid` domain is a reserved TLD (see [RFC 2606 Reserved Top Level DNS Na
~~~ corefile ~~~ corefile
. { . {
proxy . 8.8.8.8 forward . 8.8.8.8
template ANY ANY invalid { template ANY ANY invalid {
rcode NXDOMAIN rcode NXDOMAIN
@ -116,7 +116,7 @@ path (`dc1.example.com`) added.
~~~ corefile ~~~ corefile
. { . {
proxy . 8.8.8.8 forward . 8.8.8.8
template IN ANY example.com.dc1.example.com { template IN ANY example.com.dc1.example.com {
rcode NXDOMAIN rcode NXDOMAIN
@ -129,7 +129,7 @@ A more verbose regex based equivalent would be
~~~ corefile ~~~ corefile
. { . {
proxy . 8.8.8.8 forward . 8.8.8.8
template IN ANY example.com { template IN ANY example.com {
match "example\.com\.(dc1\.example\.com\.)$" match "example\.com\.(dc1\.example\.com\.)$"
@ -146,7 +146,7 @@ The regex-based version can do more complex matching/templating while zone-based
~~~ corefile ~~~ corefile
. { . {
proxy . 8.8.8.8 forward . 8.8.8.8
# ip-a-b-c-d.example A a.b.c.d # ip-a-b-c-d.example A a.b.c.d
@ -178,7 +178,7 @@ Fallthrough is needed for mixed domains where only some responses are templated.
~~~ corefile ~~~ corefile
. { . {
proxy . 8.8.8.8 forward . 8.8.8.8
template IN A example { template IN A example {
match "^ip-(?P<a>10)-(?P<b>[0-9]*)-(?P<c>[0-9]*)-(?P<d>[0-9]*)[.]dc[.]example[.]$" match "^ip-(?P<a>10)-(?P<b>[0-9]*)-(?P<c>[0-9]*)-(?P<d>[0-9]*)[.]dc[.]example[.]$"
@ -195,7 +195,7 @@ Named capture groups can be used to template one response for multiple patterns.
~~~ corefile ~~~ corefile
. { . {
proxy . 8.8.8.8 forward . 8.8.8.8
template IN A example { template IN A example {
match ^ip-10-(?P<b>[0-9]*)-(?P<c>[0-9]*)-(?P<d>[0-9]*)[.]example[.]$ match ^ip-10-(?P<b>[0-9]*)-(?P<c>[0-9]*)-(?P<d>[0-9]*)[.]example[.]$
@ -215,7 +215,7 @@ Named capture groups can be used to template one response for multiple patterns.
~~~ corefile ~~~ corefile
. { . {
proxy . 8.8.8.8 forward . 8.8.8.8
template IN A example { template IN A example {
match ^ip-10-(?P<b>[0-9]*)-(?P<c>[0-9]*)-(?P<d>[0-9]*)[.]example[.]$ match ^ip-10-(?P<b>[0-9]*)-(?P<c>[0-9]*)-(?P<d>[0-9]*)[.]example[.]$

View file

@ -10,9 +10,6 @@ CoreDNS supports queries that are encrypted using TLS (DNS over Transport Layer
or are using gRPC (https://grpc.io/, not an IETF standard). Normally DNS traffic isn't encrypted at or are using gRPC (https://grpc.io/, not an IETF standard). Normally DNS traffic isn't encrypted at
all (DNSSEC only signs resource records). all (DNSSEC only signs resource records).
The *proxy* plugin also support gRPC (`protocol gRPC`), meaning you can chain CoreDNS servers
using this protocol.
The *tls* "plugin" allows you to configure the cryptographic keys that are needed for both The *tls* "plugin" allows you to configure the cryptographic keys that are needed for both
DNS-over-TLS and DNS-over-gRPC. If the `tls` directive is omitted, then no encryption takes place. DNS-over-TLS and DNS-over-gRPC. If the `tls` directive is omitted, then no encryption takes place.
@ -35,7 +32,7 @@ nameservers defined in `/etc/resolv.conf` to resolve the query. This proxy path
~~~ ~~~
tls://.:5553 { tls://.:5553 {
tls cert.pem key.pem ca.pem tls cert.pem key.pem ca.pem
proxy . /etc/resolv.conf forward . /etc/resolv.conf
} }
~~~ ~~~
@ -45,7 +42,7 @@ incoming queries.
~~~ ~~~
grpc://. { grpc://. {
tls cert.pem key.pem ca.pem tls cert.pem key.pem ca.pem
proxy . /etc/resolv.conf forward . /etc/resolv.conf
} }
~~~ ~~~

View file

@ -26,9 +26,9 @@ func TestLookupCache(t *testing.T) {
} }
defer i.Stop() defer i.Stop()
// Start caching proxy CoreDNS that we want to test. // Start caching forward CoreDNS that we want to test.
corefile = `example.org:0 { corefile = `example.org:0 {
proxy . ` + udp + ` forward . ` + udp + `
cache 10 cache 10
} }
` `

View file

@ -47,7 +47,7 @@ func TestLookupAutoPathErratic(t *testing.T) {
corefile := `.:0 { corefile := `.:0 {
erratic erratic
autopath @erratic autopath @erratic
proxy . ` + proxyPath + ` forward . ` + proxyPath + `
debug debug
} }
` `
@ -92,7 +92,7 @@ func TestAutoPathErraticNotLoaded(t *testing.T) {
setupProxyTargetCoreDNS(t, func(proxyPath string) { setupProxyTargetCoreDNS(t, func(proxyPath string) {
corefile := `.:0 { corefile := `.:0 {
autopath @erratic autopath @erratic
proxy . ` + proxyPath + ` forward . ` + proxyPath + `
debug debug
} }
` `

View file

@ -48,7 +48,7 @@ func TestEtcdStubAndProxyLookup(t *testing.T) {
upstream upstream
fallthrough fallthrough
} }
proxy . 8.8.8.8:53 forward . 8.8.8.8:53
}` }`
ex, udp, _, err := CoreDNSServerAndPorts(corefile) ex, udp, _, err := CoreDNSServerAndPorts(corefile)

View file

@ -54,7 +54,7 @@ func TestZoneExternalCNAMELookupWithProxy(t *testing.T) {
file ` + name + ` example.org { file ` + name + ` example.org {
upstream upstream
} }
proxy . 8.8.8.8 8.8.4.4 forward . 8.8.8.8 8.8.4.4
} }
` `
i, udp, _, err := CoreDNSServerAndPorts(corefile) i, udp, _, err := CoreDNSServerAndPorts(corefile)

View file

@ -23,7 +23,7 @@ func TestMetricsServer(t *testing.T) {
} }
example.com:0 { example.com:0 {
proxy . 8.8.4.4:53 forward . 8.8.4.4:53
prometheus localhost:0 prometheus localhost:0
} }
` `
@ -38,7 +38,7 @@ func TestMetricsRefused(t *testing.T) {
metricName := "coredns_dns_response_rcode_count_total" metricName := "coredns_dns_response_rcode_count_total"
corefile := `example.org:0 { corefile := `example.org:0 {
proxy . 8.8.8.8:53 forward . 8.8.8.8:53
prometheus localhost:0 prometheus localhost:0
} }
` `

View file

@ -46,7 +46,7 @@ func TestProxyThreeWay(t *testing.T) {
// Proxying CoreDNS. // Proxying CoreDNS.
corefileProxy := `example.org:0 { corefileProxy := `example.org:0 {
proxy . ` + addr1 + " " + addr2 + ` { forward . ` + addr1 + " " + addr2 + ` {
max_fails 1 max_fails 1
} }
}` }`

View file

@ -21,7 +21,7 @@ func TestProxyToChaosServer(t *testing.T) {
defer chaos.Stop() defer chaos.Stop()
corefileProxy := `.:0 { corefileProxy := `.:0 {
proxy . ` + udpChaos + ` forward . ` + udpChaos + `
} }
` `
proxy, udp, _, err := CoreDNSServerAndPorts(corefileProxy) proxy, udp, _, err := CoreDNSServerAndPorts(corefileProxy)