BK8s datasource middleware -- PoC for A records (#153)

* Laying down kubernetes middleware foundation

* Duplicated a bunch of code form etcd middleware
* Duplicated code hacked to compile and load as a separate middleware

* Adding verbose build option to Makefile

* Removing stubzone and tls support

tls and stubzone support was carried over from base etcd middleware code.
Removing to simplify the kube middleware implementation. (For now.)

* Adding conf directory for sample conf files

* Removing stubzone support from query handler

* Remove upstream and proxy from k8s corefile.

Not sure that upstream or proxy makes sense for a k8s backed zone.

* Comment out use of singleflight serialization

* Removing parsing support for "upstream" directive from k8s

* Removing upstream directive parsing code

* Removing CNAME and TXT lookup implementation

* Create README.md

Brain-dump of DNS record name assembly and open work items.

* Adding notes about wildcard handling

* Adding basic k8s API client

* Fleshing out methods on k8s connector

* Remove PathPrefix from middleware init

* Removing incorrect plural

* Adding brute-force k8s service lookup functions

* Initializing k8s API connector during startup

* Hacking around to call k8s connector

* Parsing incoming domain name into serviceName and namespace

* Improving and simplifying k8s zone matching and label segmentation

* Removing unused functions carried over from etcd middleware

* Adding basic return of k8s data to DNS client

* updated debugging println statements to flag with "[debug]"
* removed code in kubernetes.go::Records that was a hold-over from etcd middleware.
* Removed some random exploratory hacking.

* Minior README.md updates

* Updating with demo instructions

* Updating README.md with CoreFile and removing completed TODO items

* Updating conf file and README to reflect DNS response cache works

* Disabling DNS response caching

* Adding debug statement on entry to Records()

* Changing port number in exampes to port 53.

* Misc style and clarity changes

* Removing empty function definitions

* Adding comment to track future cleanup

* Refactoring README to follow style of other middleware

* Exposing dataobject field (typo)
This commit is contained in:
Michael Richmond 2016-06-06 12:49:53 -07:00 committed by Miek Gieben
parent 446eaa957d
commit d04abdf422
14 changed files with 1560 additions and 1 deletions

1
.gitignore vendored
View file

@ -2,3 +2,4 @@ query.log
Corefile
*.swp
coredns
conf/devk8sCorefile

View file

@ -1,5 +1,8 @@
#VERBOSE :=
VERBOSE := -v
all:
go build
go build $(VERBOSE)
.PHONY: docker
docker:

13
conf/k8sCorefile Normal file
View file

@ -0,0 +1,13 @@
# Serve on port 53
.:53 {
# use kubernetes middleware for domain "coredns.local"
kubernetes coredns.local {
# Use url for k8s API endpoint
endpoint http://localhost:8080
}
# Perform DNS response caching for the coredns.local zone
# Cache timeout is provided by the integer in seconds
# This works for the kubernetes middleware.)
#cache 20 coredns.local
#cache 160 coredns.local
}

View file

@ -65,6 +65,7 @@ var directiveOrder = []directive{
{"file", setup.File},
{"secondary", setup.Secondary},
{"etcd", setup.Etcd},
{"kubernetes", setup.Kubernetes},
{"proxy", setup.Proxy},
}

93
core/setup/kubernetes.go Normal file
View file

@ -0,0 +1,93 @@
package setup
import (
// "crypto/tls"
// "crypto/x509"
"fmt"
// "io/ioutil"
// "net"
// "net/http"
// "time"
"github.com/miekg/coredns/middleware"
"github.com/miekg/coredns/middleware/kubernetes"
k8sc "github.com/miekg/coredns/middleware/kubernetes/k8sclient"
"github.com/miekg/coredns/middleware/proxy"
// "github.com/miekg/coredns/middleware/singleflight"
"golang.org/x/net/context"
)
const defaultK8sEndpoint = "http://localhost:8080"
// Kubernetes sets up the kubernetes middleware.
func Kubernetes(c *Controller) (middleware.Middleware, error) {
fmt.Println("controller %v", c)
// TODO: Determine if subzone support required
kubernetes, err := kubernetesParse(c)
if err != nil {
return nil, err
}
return func(next middleware.Handler) middleware.Handler {
kubernetes.Next = next
return kubernetes
}, nil
}
func kubernetesParse(c *Controller) (kubernetes.Kubernetes, error) {
/*
* TODO: Remove unused state and simplify.
* Inflight and Ctx might not be needed. Leaving in place until
* we take a pass at API caching and optimizing connector to the
* k8s API. Single flight (or limited upper-bound) for inflight
* API calls may be desirable.
*/
k8s := kubernetes.Kubernetes{
Proxy: proxy.New([]string{}),
Ctx: context.Background(),
// Inflight: &singleflight.Group{},
APIConn: nil,
}
var (
endpoints = []string{defaultK8sEndpoint}
)
for c.Next() {
if c.Val() == "kubernetes" {
k8s.Zones = c.RemainingArgs()
if len(k8s.Zones) == 0 {
k8s.Zones = c.ServerBlockHosts
}
middleware.Zones(k8s.Zones).FullyQualify()
if c.NextBlock() {
// TODO(miek): 2 switches?
switch c.Val() {
case "endpoint":
args := c.RemainingArgs()
if len(args) == 0 {
return kubernetes.Kubernetes{}, c.ArgErr()
}
endpoints = args
k8s.APIConn = k8sc.NewK8sConnector(endpoints[0])
}
for c.Next() {
switch c.Val() {
case "endpoint":
args := c.RemainingArgs()
if len(args) == 0 {
return kubernetes.Kubernetes{}, c.ArgErr()
}
endpoints = args
}
}
}
return k8s, nil
}
fmt.Println("endpoints='%v'", endpoints)
}
return kubernetes.Kubernetes{}, nil
}

View file

@ -0,0 +1,284 @@
# kubernetes
`kubernetes` enables reading zone data from a kubernetes cluster. Record names
are constructed as "myservice.mynamespace.coredns.local" where:
* "myservice" is the name of the k8s service (this may include multiple DNS labels, such as "c1.myservice"),
* "mynamespace" is the k8s namespace for the service, and
* "coredns.local" is the zone configured for `kubernetes`.
## Syntax
~~~
kubernetes [zones...]
~~~
* `zones` zones kubernetes should be authorative for.
~~~
kubernetes [zones] {
endpoint http://localhost:8080
}
~~~
* `endpoint` the kubernetes API endpoint, default to http://localhost:8080
## Examples
This is the default kubernetes setup, with everything specified in full:
~~~
# Serve on port 53
.:53 {
# use kubernetes middleware for domain "coredns.local"
kubernetes coredns.local {
# Use url for k8s API endpoint
endpoint http://localhost:8080
}
# cache 160 coredns.local
}
~~~
### Basic Setup
#### Launch Kubernetes
Kubernetes is launched using the commands in the following `run_k8s.sh` script:
~~~
#!/bin/bash
# Based on instructions at: http://kubernetes.io/docs/getting-started-guides/docker/
#K8S_VERSION=$(curl -sS https://storage.googleapis.com/kubernetes-release/release/latest.txt)
K8S_VERSION="v1.2.4"
ARCH="amd64"
export K8S_VERSION
export ARCH
#DNS_ARGUMENTS="--cluster-dns=10.0.0.10 --cluster-domain=cluster.local"
DNS_ARGUMENTS=""
docker run -d \
--volume=/:/rootfs:ro \
--volume=/sys:/sys:ro \
--volume=/var/lib/docker/:/var/lib/docker:rw \
--volume=/var/lib/kubelet/:/var/lib/kubelet:rw \
--volume=/var/run:/var/run:rw \
--net=host \
--pid=host \
--privileged \
gcr.io/google_containers/hyperkube-${ARCH}:${K8S_VERSION} \
/hyperkube kubelet \
--containerized \
--hostname-override=127.0.0.1 \
--api-servers=http://localhost:8080 \
--config=/etc/kubernetes/manifests \
${DNS_ARGUMENTS} \
--allow-privileged --v=2
~~~
#### Configure kubectl and test
The kubernetes control client can be downloaded from the generic URL:
`http://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/${GOOS}/${GOARCH}/${K8S_BINARY}`
For example, the kubectl client for Linux can be downloaded using the command:
`curl -sSL "http://storage.googleapis.com/kubernetes-release/release/v1.2.4/bin/linux/amd64/kubectl"
The following `setup_kubectl.sh` script can be stored in the same directory as
kubectl to setup
kubectl to communicate with kubernetes running on the localhost:
~~~
#!/bin/bash
BASEDIR=`realpath $(dirname ${0})`
${BASEDIR}/kubectl config set-cluster test-doc --server=http://localhost:8080
${BASEDIR}/kubectl config set-context test-doc --cluster=test-doc
${BASEDIR}/kubectl config use-context test-doc
alias kubctl="${BASEDIR}/kubectl"
~~~
Verify that kubectl is working by querying for the kubernetes namespaces:
~~~
$ ./kubectl get namespaces
NAME STATUS AGE
default Active 8d
test Active 7d
~~~
#### Launch a kubernetes service and expose the service
The following commands will create a kubernetes namespace "demo",
launch an nginx service in the namespace, and expose the service on port 80:
~~~
$ ./kubectl create namespace demo
$ ./kubectl get namespace
$ ./kubectl run mynginx --namespace=demo --image=nginx
$ /kubectl get deployment --namespace=demo
$ ./kubectl expose deployment mynginx --namespace=demo --port=80
$ ./kubectl get service --namespace=demo
~~~
#### Launch CoreDNS
Build CoreDNS and launch using the configuration file in `conf/k8sCorefile`.
This configuration file sets up CoreDNS to use the zone `coredns.local` for
the kubernetes services.
The command to launch CoreDNS is:
~~~
$ ./coredns -conf conf/k8sCoreFile
~~~
In a separate terminal a dns query can be issued using dig:
~~~
$ dig @localhost mynginx.demo.coredns.local
; <<>> DiG 9.9.4-RedHat-9.9.4-29.el7_2.3 <<>> @localhost mynginx.demo.coredns.local
; (2 servers found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 47614
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;mynginx.demo.coredns.local. IN A
;; ANSWER SECTION:
mynginx.demo.coredns.local. 0 IN A 10.0.0.10
;; Query time: 2 msec
;; SERVER: ::1#53(::1)
;; WHEN: Thu Jun 02 11:07:18 PDT 2016
;; MSG SIZE rcvd: 71
~~~
## Implementation Notes/Ideas
### Basic Zone Mapping (implemented)
The middleware is configured with a "zone" string. For
example: "zone = coredns.local".
The Kubernetes service "myservice" running in "mynamespace" would map
to: "myservice.mynamespace.coredns.local".
The middleware should publish an A record for that service and a service record.
Initial implementation just performs the above simple mapping. Subsequent
revisions should allow different namespaces to be published under different zones.
For example:
# Serve on port 53
.:53 {
# use kubernetes middleware for domain "coredns.local"
kubernetes coredns.local {
# Use url for k8s API endpoint
endpoint http://localhost:8080
}
# Perform DNS response caching for the coredns.local zone
# Cache timeout is provided by the integer argument in seconds
# This works for the kubernetes middleware.)
#cache 20 coredns.local
#cache 160 coredns.local
}
### Internal IP or External IP?
* Should the Corefile configuration allow control over whether the internal IP or external IP is exposed?
* If the Corefile configuration allows control over internal IP or external IP, then the config should allow users to control the precidence.
For example a service "myservice" running in namespace "mynamespace" with internal IP "10.0.0.100" and external IP "1.2.3.4".
This example could be published as:
| Corefile directive | Result |
|------------------------------|---------------------|
| iporder = internal | 10.0.0.100 |
| iporder = external | 1.2.3.4 |
| iporder = external, internal | 10.0.0.100, 1.2.3.4 |
| iporder = internal, external | 1.2.3.4, 10.0.0.100 |
| _no directive_ | 10.0.0.100, 1.2.3.4 |
### Wildcards
Publishing DNS records for singleton services isn't very interesting. Service
names are unique within a k8s namespace therefore multiple services will be
commonly run with a structured naming scheme.
For example, running multiple nginx services under the names:
| Service name |
|--------------|
| c1.nginx |
| c2.nginx |
or:
| Service name |
|--------------|
| nginx.c3 |
| nginx.c4 |
A DNS query with wildcard support for "nginx" in these examples should
return the IP addresses for all services with "nginx" in the service name.
TBD:
* How does this relate the the k8s load-balancer configuration?
* Do wildcards search across namespaces?
* Initial implementation assumes that a namespace maps to the first DNS label below the zone managed by the kubernetes middleware. This assumption may need to be revised.
## TODO
* Implement namespace filtering to different zones.
* Implement IP selection and ordering (internal/external).
* Implement SRV-record queries using naive lookup.
* Flatten service and namespace names to valid DNS characters. (service names
and namespace names in k8s may use uppercase and non-DNS characters. Implement
flattening to lower case and mapping of non-DNS characters to DNS characters
in a standard way.)
* Do we need to generate synthetic zone records for namespaces?
* Implement wildcard-based lookup.
* Improve lookup to reduce size of query result obtained from k8s API.
(namespace-based?, other ideas?)
* How to support label specification in Corefile to allow use of labels to
indicate zone? (Is this even useful?) For example, the following configuration
exposes all services labeled for the "staging" environment and tenant "customerB"
in the zone "customerB.stage.local":
~~~
kubernetes customerB.stage.local {
# Use url for k8s API endpoint
endpoint http://localhost:8080
label "environment" : "staging", "tenant" : "customerB"
}
~~~
* Test with CoreDNS caching. CoreDNS caching for DNS response is working using
the `cache` directive. Tested working using 20s cache timeout and A-record queries.
* DNS response caching is good, but we should also cache at the http query
level as well. (Take a look at https://github.com/patrickmn/go-cache as
a potential expiring cache implementation for the http API queries.)

View file

@ -0,0 +1,101 @@
package kubernetes
import (
"fmt"
"github.com/miekg/coredns/middleware"
"github.com/miekg/dns"
"golang.org/x/net/context"
)
func (k Kubernetes) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
fmt.Println("[debug] here entering ServeDNS: ctx:%v dnsmsg:%v", ctx, r)
state := middleware.State{W: w, Req: r}
if state.QClass() != dns.ClassINET {
return dns.RcodeServerFailure, fmt.Errorf("can only deal with ClassINET")
}
// Check that query matches one of the zones served by this middleware,
// otherwise delegate to the next in the pipeline.
zone := middleware.Zones(k.Zones).Matches(state.Name())
if zone == "" {
if k.Next == nil {
return dns.RcodeServerFailure, nil
}
return k.Next.ServeDNS(ctx, w, r)
}
m := new(dns.Msg)
m.SetReply(r)
m.Authoritative, m.RecursionAvailable, m.Compress = true, true, true
var (
records, extra []dns.RR
err error
)
switch state.Type() {
case "A":
records, err = k.A(zone, state, nil)
case "AAAA":
records, err = k.AAAA(zone, state, nil)
case "TXT":
records, err = k.TXT(zone, state)
case "CNAME":
records, err = k.CNAME(zone, state)
case "MX":
records, extra, err = k.MX(zone, state)
case "SRV":
records, extra, err = k.SRV(zone, state)
case "SOA":
records = []dns.RR{k.SOA(zone, state)}
case "NS":
if state.Name() == zone {
records, extra, err = k.NS(zone, state)
break
}
fallthrough
default:
// Do a fake A lookup, so we can distinguish betwen NODATA and NXDOMAIN
_, err = k.A(zone, state, nil)
}
if isKubernetesNameError(err) {
return k.Err(zone, dns.RcodeNameError, state)
}
if err != nil {
return dns.RcodeServerFailure, err
}
if len(records) == 0 {
return k.Err(zone, dns.RcodeSuccess, state)
}
m.Answer = append(m.Answer, records...)
m.Extra = append(m.Extra, extra...)
m = dedup(m)
state.SizeAndDo(m)
m, _ = state.Scrub(m)
w.WriteMsg(m)
return dns.RcodeSuccess, nil
}
// NoData write a nodata response to the client.
func (k Kubernetes) Err(zone string, rcode int, state middleware.State) (int, error) {
m := new(dns.Msg)
m.SetRcode(state.Req, rcode)
m.Ns = []dns.RR{k.SOA(zone, state)}
state.SizeAndDo(m)
state.W.WriteMsg(m)
return rcode, nil
}
func dedup(m *dns.Msg) *dns.Msg {
// TODO(miek): expensive!
m.Answer = dns.Dedup(m.Answer, nil)
m.Ns = dns.Dedup(m.Ns, nil)
m.Extra = dns.Dedup(m.Extra, nil)
return m
}

View file

@ -0,0 +1,110 @@
package k8sclient
import (
"encoding/json"
"net/http"
)
func getJson(url string, target interface{}) error {
r, err := http.Get(url)
if err != nil {
return err
}
defer r.Body.Close()
return json.NewDecoder(r.Body).Decode(target)
}
// Kubernetes Resource List
type ResourceList struct {
Kind string `json:"kind"`
GroupVersion string `json:"groupVersion"`
Resources []resource `json:"resources"`
}
type resource struct {
Name string `json:"name"`
Namespaced bool `json:"namespaced"`
Kind string `json:"kind"`
}
// Kubernetes NamespaceList
type NamespaceList struct {
Kind string `json:"kind"`
APIVersion string `json:"apiVersion"`
Metadata apiListMetadata `json:"metadata"`
Items []nsItems `json:"items"`
}
type apiListMetadata struct {
SelfLink string `json:"selfLink"`
resourceVersion string `json:"resourceVersion"`
}
type nsItems struct {
Metadata nsMetadata `json:"metadata"`
Spec nsSpec `json:"spec"`
Status nsStatus `json:"status"`
}
type nsMetadata struct {
Name string `json:"name"`
SelfLink string `json:"selfLink"`
Uid string `json:"uid"`
ResourceVersion string `json:"resourceVersion"`
CreationTimestamp string `json:"creationTimestamp"`
}
type nsSpec struct {
Finalizers []string `json:"finalizers"`
}
type nsStatus struct {
Phase string `json:"phase"`
}
// Kubernetes ServiceList
type ServiceList struct {
Kind string `json:"kind"`
APIVersion string `json:"apiVersion"`
Metadata apiListMetadata `json:"metadata"`
Items []ServiceItem `json:"items"`
}
type ServiceItem struct {
Metadata serviceMetadata `json:"metadata"`
Spec serviceSpec `json:"spec"`
// Status serviceStatus `json:"status"`
}
type serviceMetadata struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
SelfLink string `json:"selfLink"`
Uid string `json:"uid"`
ResourceVersion string `json:"resourceVersion"`
CreationTimestamp string `json:"creationTimestamp"`
// labels
}
type serviceSpec struct {
Ports []servicePort `json:"ports"`
ClusterIP string `json:"clusterIP"`
Type string `json:"type"`
SessionAffinity string `json:"sessionAffinity"`
}
type servicePort struct {
Name string `json:"name"`
Protocol string `json:"protocol"`
Port int `json:"port"`
TargetPort int `json:"targetPort"`
}
type serviceStatus struct {
LoadBalancer string `json:"loadBalancer"`
}

View file

@ -0,0 +1,117 @@
package k8sclient
import (
// "fmt"
"net/url"
)
// API strings
const (
apiBase = "/api/v1"
apiNamespaces = "/namespaces"
apiServices = "/services"
)
// Defaults
const (
defaultBaseUrl = "http://localhost:8080"
)
type K8sConnector struct {
baseUrl string
}
func (c *K8sConnector) SetBaseUrl(u string) error {
validUrl, error := url.Parse(u)
if error != nil {
return error
}
c.baseUrl = validUrl.String()
return nil
}
func (c *K8sConnector) GetBaseUrl() string {
return c.baseUrl
}
func (c *K8sConnector) GetResourceList() *ResourceList {
resources := new(ResourceList)
error := getJson((c.baseUrl + apiBase), resources)
if error != nil {
return nil
}
return resources
}
func (c *K8sConnector) GetNamespaceList() *NamespaceList {
namespaces := new(NamespaceList)
error := getJson((c.baseUrl + apiBase + apiNamespaces), namespaces)
if error != nil {
return nil
}
return namespaces
}
func (c *K8sConnector) GetServiceList() *ServiceList {
services := new(ServiceList)
error := getJson((c.baseUrl + apiBase + apiServices), services)
if error != nil {
return nil
}
return services
}
func (c *K8sConnector) GetServicesByNamespace() map[string][]ServiceItem {
// GetServicesByNamespace returns a map of namespacename :: [ kubernetesServiceItem ]
items := make(map[string][]ServiceItem)
k8sServiceList := c.GetServiceList()
k8sItemList := k8sServiceList.Items
for _, i := range k8sItemList {
namespace := i.Metadata.Namespace
items[namespace] = append(items[namespace], i)
}
return items
}
func (c *K8sConnector) GetServiceItemInNamespace(namespace string, servicename string) *ServiceItem {
// GetServiceItemInNamespace returns the ServiceItem that matches servicename in the namespace
itemMap := c.GetServicesByNamespace()
// TODO: Handle case where namesapce == nil
for _, x := range itemMap[namespace] {
if x.Metadata.Name == servicename {
return &x
}
}
// No matching item found in namespace
return nil
}
func NewK8sConnector(baseurl string) *K8sConnector {
k := new(K8sConnector)
k.SetBaseUrl(baseurl)
return k
}

View file

@ -0,0 +1,223 @@
// Package kubernetes provides the kubernetes backend.
package kubernetes
import (
"fmt"
"strings"
"time"
"github.com/miekg/coredns/middleware"
"github.com/miekg/coredns/middleware/kubernetes/msg"
k8sc "github.com/miekg/coredns/middleware/kubernetes/k8sclient"
"github.com/miekg/coredns/middleware/proxy"
// "github.com/miekg/coredns/middleware/singleflight"
"github.com/miekg/dns"
"golang.org/x/net/context"
)
type Kubernetes struct {
Next middleware.Handler
Zones []string
Proxy proxy.Proxy // Proxy for looking up names during the resolution process
Ctx context.Context
// Inflight *singleflight.Group
APIConn *k8sc.K8sConnector
}
func (g Kubernetes) getZoneForName(name string) (string, []string) {
/*
* getZoneForName returns the zone string that matches the name and a
* list of the DNS labels from name that are within the zone.
* For example, if "coredns.local" is a zone configured for the
* Kubernetes middleware, then getZoneForName("a.b.coredns.local")
* will return ("coredns.local", ["a", "b"]).
*/
var zone string
var serviceSegments []string
for _, z := range g.Zones {
if dns.IsSubDomain(z, name) {
zone = z
serviceSegments = dns.SplitDomainName(name)
serviceSegments = serviceSegments[:len(serviceSegments) - dns.CountLabel(zone)]
break
}
}
return zone, serviceSegments
}
// Records looks up services in kubernetes.
// If exact is true, it will lookup just
// this name. This is used when find matches when completing SRV lookups
// for instance.
func (g Kubernetes) Records(name string, exact bool) ([]msg.Service, error) {
fmt.Println("enter Records('", name, "', ", exact, ")")
zone, serviceSegments := g.getZoneForName(name)
var serviceName string
var namespace string
// For initial implementation, assume namespace is first serviceSegment
// and service name is remaining segments.
serviceSegLen := len(serviceSegments)
if serviceSegLen >= 2 {
namespace = serviceSegments[serviceSegLen-1]
serviceName = strings.Join(serviceSegments[:serviceSegLen-1], ".")
}
// else we are looking up the zone. So handle the NS, SOA records etc.
fmt.Println("[debug] zone: ", zone)
fmt.Println("[debug] servicename: ", serviceName)
fmt.Println("[debug] namespace: ", namespace)
fmt.Println("[debug] APIconn: ", g.APIConn)
k8sItem := g.APIConn.GetServiceItemInNamespace(namespace, serviceName)
fmt.Println("[debug] k8s item:", k8sItem)
switch {
case exact && k8sItem == nil:
fmt.Println("here2")
return nil, nil
}
if k8sItem == nil {
// Did not find item in k8s
return nil, nil
}
fmt.Println("[debug] clusterIP:", k8sItem.Spec.ClusterIP)
for _, p := range k8sItem.Spec.Ports {
fmt.Println("[debug] host:", name)
fmt.Println("[debug] port:", p.Port)
}
clusterIP := k8sItem.Spec.ClusterIP
var records []msg.Service
for _, p := range k8sItem.Spec.Ports{
s := msg.Service{Host: clusterIP, Port: p.Port}
records = append(records, s)
}
return records, nil
}
/*
// Get performs the call to the Kubernetes http API.
func (g Kubernetes) Get(path string, recursive bool) (bool, error) {
fmt.Println("[debug] in Get path: ", path)
fmt.Println("[debug] in Get recursive: ", recursive)
return false, nil
}
*/
func (g Kubernetes) splitDNSName(name string) []string {
l := dns.SplitDomainName(name)
for i, j := 0, len(l)-1; i < j; i, j = i+1, j-1 {
l[i], l[j] = l[j], l[i]
}
return l
}
// skydns/local/skydns/east/staging/web
// skydns/local/skydns/west/production/web
//
// skydns/local/skydns/*/*/web
// skydns/local/skydns/*/web
// loopNodes recursively loops through the nodes and returns all the values. The nodes' keyname
// will be match against any wildcards when star is true.
/*
func (g Kubernetes) loopNodes(ns []*etcdc.Node, nameParts []string, star bool, bx map[msg.Service]bool) (sx []msg.Service, err error) {
if bx == nil {
bx = make(map[msg.Service]bool)
}
Nodes:
for _, n := range ns {
if n.Dir {
nodes, err := g.loopNodes(n.Nodes, nameParts, star, bx)
if err != nil {
return nil, err
}
sx = append(sx, nodes...)
continue
}
if star {
keyParts := strings.Split(n.Key, "/")
for i, n := range nameParts {
if i > len(keyParts)-1 {
// name is longer than key
continue Nodes
}
if n == "*" || n == "any" {
continue
}
if keyParts[i] != n {
continue Nodes
}
}
}
serv := new(msg.Service)
if err := json.Unmarshal([]byte(n.Value), serv); err != nil {
return nil, err
}
b := msg.Service{Host: serv.Host, Port: serv.Port, Priority: serv.Priority, Weight: serv.Weight, Text: serv.Text, Key: n.Key}
if _, ok := bx[b]; ok {
continue
}
bx[b] = true
serv.Key = n.Key
serv.Ttl = g.Ttl(n, serv)
if serv.Priority == 0 {
serv.Priority = priority
}
sx = append(sx, *serv)
}
return sx, nil
}
// Ttl returns the smaller of the kubernetes TTL and the service's
// TTL. If neither of these are set (have a zero value), a default is used.
func (g Kubernetes) Ttl(node *etcdc.Node, serv *msg.Service) uint32 {
kubernetesTtl := uint32(node.TTL)
if kubernetesTtl == 0 && serv.Ttl == 0 {
return ttl
}
if kubernetesTtl == 0 {
return serv.Ttl
}
if serv.Ttl == 0 {
return kubernetesTtl
}
if kubernetesTtl < serv.Ttl {
return kubernetesTtl
}
return serv.Ttl
}
*/
// kubernetesNameError checks if the error is ErrorCodeKeyNotFound from kubernetes.
func isKubernetesNameError(err error) bool {
return false
}
const (
priority = 10 // default priority when nothing is set
ttl = 300 // default ttl when nothing is set
minTtl = 60
hostmaster = "hostmaster"
k8sTimeout = 5 * time.Second
)

View file

@ -0,0 +1,305 @@
package kubernetes
import (
"fmt"
"math"
"net"
"time"
"github.com/miekg/coredns/middleware"
"github.com/miekg/coredns/middleware/kubernetes/msg"
"github.com/miekg/dns"
)
func (k Kubernetes) records(state middleware.State, exact bool) ([]msg.Service, error) {
services, err := k.Records(state.Name(), exact)
if err != nil {
return nil, err
}
// TODO: Do we want to support the SkyDNS (hacky) Group feature?
services = msg.Group(services)
return services, nil
}
func (k Kubernetes) A(zone string, state middleware.State, previousRecords []dns.RR) (records []dns.RR, err error) {
services, err := k.records(state, false)
if err != nil {
return nil, err
}
for _, serv := range services {
ip := net.ParseIP(serv.Host)
switch {
case ip == nil:
// TODO(miek): lowercasing? Should lowercase in everything see #85
if middleware.Name(state.Name()).Matches(dns.Fqdn(serv.Host)) {
// x CNAME x is a direct loop, don't add those
continue
}
newRecord := serv.NewCNAME(state.QName(), serv.Host)
if len(previousRecords) > 7 {
// don't add it, and just continue
continue
}
if isDuplicateCNAME(newRecord, previousRecords) {
continue
}
state1 := copyState(state, serv.Host, state.QType())
nextRecords, err := k.A(zone, state1, append(previousRecords, newRecord))
if err == nil {
// Not only have we found something we should add the CNAME and the IP addresses.
if len(nextRecords) > 0 {
records = append(records, newRecord)
records = append(records, nextRecords...)
}
continue
}
// This means we can not complete the CNAME, try to look else where.
target := newRecord.Target
if dns.IsSubDomain(zone, target) {
// We should already have found it
continue
}
m1, e1 := k.Proxy.Lookup(state, target, state.QType())
if e1 != nil {
continue
}
// Len(m1.Answer) > 0 here is well?
records = append(records, newRecord)
records = append(records, m1.Answer...)
continue
case ip.To4() != nil:
records = append(records, serv.NewA(state.QName(), ip.To4()))
case ip.To4() == nil:
// nodata?
}
}
return records, nil
}
func (k Kubernetes) AAAA(zone string, state middleware.State, previousRecords []dns.RR) (records []dns.RR, err error) {
services, err := k.records(state, false)
if err != nil {
return nil, err
}
for _, serv := range services {
ip := net.ParseIP(serv.Host)
switch {
case ip == nil:
// Try to resolve as CNAME if it's not an IP, but only if we don't create loops.
if middleware.Name(state.Name()).Matches(dns.Fqdn(serv.Host)) {
// x CNAME x is a direct loop, don't add those
continue
}
newRecord := serv.NewCNAME(state.QName(), serv.Host)
if len(previousRecords) > 7 {
// don't add it, and just continue
continue
}
if isDuplicateCNAME(newRecord, previousRecords) {
continue
}
state1 := copyState(state, serv.Host, state.QType())
nextRecords, err := k.AAAA(zone, state1, append(previousRecords, newRecord))
if err == nil {
// Not only have we found something we should add the CNAME and the IP addresses.
if len(nextRecords) > 0 {
records = append(records, newRecord)
records = append(records, nextRecords...)
}
continue
}
// This means we can not complete the CNAME, try to look else where.
target := newRecord.Target
if dns.IsSubDomain(zone, target) {
// We should already have found it
continue
}
m1, e1 := k.Proxy.Lookup(state, target, state.QType())
if e1 != nil {
continue
}
// Len(m1.Answer) > 0 here is well?
records = append(records, newRecord)
records = append(records, m1.Answer...)
continue
// both here again
case ip.To4() != nil:
// nada?
case ip.To4() == nil:
records = append(records, serv.NewAAAA(state.QName(), ip.To16()))
}
}
return records, nil
}
// SRV returns SRV records from etcd.
// If the Target is not a name but an IP address, a name is created on the fly.
func (k Kubernetes) SRV(zone string, state middleware.State) (records []dns.RR, extra []dns.RR, err error) {
services, err := k.records(state, false)
if err != nil {
return nil, nil, err
}
// Looping twice to get the right weight vs priority
w := make(map[int]int)
for _, serv := range services {
weight := 100
if serv.Weight != 0 {
weight = serv.Weight
}
if _, ok := w[serv.Priority]; !ok {
w[serv.Priority] = weight
continue
}
w[serv.Priority] += weight
}
lookup := make(map[string]bool)
for _, serv := range services {
w1 := 100.0 / float64(w[serv.Priority])
if serv.Weight == 0 {
w1 *= 100
} else {
w1 *= float64(serv.Weight)
}
weight := uint16(math.Floor(w1))
ip := net.ParseIP(serv.Host)
switch {
case ip == nil:
srv := serv.NewSRV(state.QName(), weight)
records = append(records, srv)
if _, ok := lookup[srv.Target]; ok {
break
}
lookup[srv.Target] = true
if !dns.IsSubDomain(zone, srv.Target) {
m1, e1 := k.Proxy.Lookup(state, srv.Target, dns.TypeA)
if e1 == nil {
extra = append(extra, m1.Answer...)
}
m1, e1 = k.Proxy.Lookup(state, srv.Target, dns.TypeAAAA)
if e1 == nil {
// If we have seen CNAME's we *assume* that they are already added.
for _, a := range m1.Answer {
if _, ok := a.(*dns.CNAME); !ok {
extra = append(extra, a)
}
}
}
break
}
// Internal name, we should have some info on them, either v4 or v6
// Clients expect a complete answer, because we are a recursor in their view.
state1 := copyState(state, srv.Target, dns.TypeA)
addr, e1 := k.A(zone, state1, nil)
if e1 == nil {
extra = append(extra, addr...)
}
// k.AAA(zone, state1, nil) as well...?
case ip.To4() != nil:
serv.Host = k.Domain(serv.Key)
srv := serv.NewSRV(state.QName(), weight)
records = append(records, srv)
extra = append(extra, serv.NewA(srv.Target, ip.To4()))
case ip.To4() == nil:
serv.Host = k.Domain(serv.Key)
srv := serv.NewSRV(state.QName(), weight)
records = append(records, srv)
extra = append(extra, serv.NewAAAA(srv.Target, ip.To16()))
}
}
return records, extra, nil
}
// Returning MX records from kubernetes not implemented.
func (k Kubernetes) MX(zone string, state middleware.State) (records []dns.RR, extra []dns.RR, err error) {
return nil, nil, err
}
// Returning CNAME records from kubernetes not implemented.
func (k Kubernetes) CNAME(zone string, state middleware.State) (records []dns.RR, err error) {
return nil, err
}
// Returning TXT records from kubernetes not implemented.
func (k Kubernetes) TXT(zone string, state middleware.State) (records []dns.RR, err error) {
return nil, err
}
func (k Kubernetes) NS(zone string, state middleware.State) (records, extra []dns.RR, err error) {
// NS record for this zone live in a special place, ns.dns.<zone>. Fake our lookup.
// only a tad bit fishy...
old := state.QName()
state.Clear()
state.Req.Question[0].Name = "ns.dns." + zone
services, err := k.records(state, false)
if err != nil {
return nil, nil, err
}
// ... and reset
state.Req.Question[0].Name = old
for _, serv := range services {
ip := net.ParseIP(serv.Host)
switch {
case ip == nil:
return nil, nil, fmt.Errorf("NS record must be an IP address: %s", serv.Host)
case ip.To4() != nil:
serv.Host = k.Domain(serv.Key)
records = append(records, serv.NewNS(state.QName()))
extra = append(extra, serv.NewA(serv.Host, ip.To4()))
case ip.To4() == nil:
serv.Host = k.Domain(serv.Key)
records = append(records, serv.NewNS(state.QName()))
extra = append(extra, serv.NewAAAA(serv.Host, ip.To16()))
}
}
return records, extra, nil
}
// SOA Record returns a SOA record.
func (k Kubernetes) SOA(zone string, state middleware.State) *dns.SOA {
header := dns.RR_Header{Name: zone, Rrtype: dns.TypeSOA, Ttl: 300, Class: dns.ClassINET}
return &dns.SOA{Hdr: header,
Mbox: "hostmaster." + zone,
Ns: "ns.dns." + zone,
Serial: uint32(time.Now().Unix()),
Refresh: 7200,
Retry: 1800,
Expire: 86400,
Minttl: 60,
}
}
// TODO(miek): DNSKEY and friends... intercepted by the DNSSEC middleware?
func isDuplicateCNAME(r *dns.CNAME, records []dns.RR) bool {
for _, rec := range records {
if v, ok := rec.(*dns.CNAME); ok {
if v.Target == r.Target {
return true
}
}
}
return false
}
func copyState(state middleware.State, target string, typ uint16) middleware.State {
state1 := middleware.State{W: state.W, Req: state.Req.Copy()}
state1.Req.Question[0] = dns.Question{dns.Fqdn(target), dns.ClassINET, typ}
return state1
}

View file

@ -0,0 +1,166 @@
package msg
import (
"net"
"strings"
"github.com/miekg/dns"
)
// This *is* the rdata from a SRV record, but with a twist.
// Host (Target in SRV) must be a domain name, but if it looks like an IP
// address (4/6), we will treat it like an IP address.
type Service struct {
Host string `json:"host,omitempty"`
Port int `json:"port,omitempty"`
Priority int `json:"priority,omitempty"`
Weight int `json:"weight,omitempty"`
Text string `json:"text,omitempty"`
Mail bool `json:"mail,omitempty"` // Be an MX record. Priority becomes Preference.
Ttl uint32 `json:"ttl,omitempty"`
// When a SRV record with a "Host: IP-address" is added, we synthesize
// a srv.Target domain name. Normally we convert the full Key where
// the record lives to a DNS name and use this as the srv.Target. When
// TargetStrip > 0 we strip the left most TargetStrip labels from the
// DNS name.
TargetStrip int `json:"targetstrip,omitempty"`
// Group is used to group (or *not* to group) different services
// together. Services with an identical Group are returned in the same
// answer.
Group string `json:"group,omitempty"`
// Etcd key where we found this service and ignored from json un-/marshalling
Key string `json:"-"`
}
// NewSRV returns a new SRV record based on the Service.
func (s *Service) NewSRV(name string, weight uint16) *dns.SRV {
host := targetStrip(dns.Fqdn(s.Host), s.TargetStrip)
return &dns.SRV{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeSRV, Class: dns.ClassINET, Ttl: s.Ttl},
Priority: uint16(s.Priority), Weight: weight, Port: uint16(s.Port), Target: dns.Fqdn(host)}
}
// NewMX returns a new MX record based on the Service.
func (s *Service) NewMX(name string) *dns.MX {
host := targetStrip(dns.Fqdn(s.Host), s.TargetStrip)
return &dns.MX{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: s.Ttl},
Preference: uint16(s.Priority), Mx: host}
}
// NewA returns a new A record based on the Service.
func (s *Service) NewA(name string, ip net.IP) *dns.A {
return &dns.A{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: s.Ttl}, A: ip}
}
// NewAAAA returns a new AAAA record based on the Service.
func (s *Service) NewAAAA(name string, ip net.IP) *dns.AAAA {
return &dns.AAAA{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: s.Ttl}, AAAA: ip}
}
// NewCNAME returns a new CNAME record based on the Service.
func (s *Service) NewCNAME(name string, target string) *dns.CNAME {
return &dns.CNAME{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeCNAME, Class: dns.ClassINET, Ttl: s.Ttl}, Target: dns.Fqdn(target)}
}
// NewTXT returns a new TXT record based on the Service.
func (s *Service) NewTXT(name string) *dns.TXT {
return &dns.TXT{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeTXT, Class: dns.ClassINET, Ttl: s.Ttl}, Txt: split255(s.Text)}
}
// NewNS returns a new NS record based on the Service.
func (s *Service) NewNS(name string) *dns.NS {
host := targetStrip(dns.Fqdn(s.Host), s.TargetStrip)
return &dns.NS{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeNS, Class: dns.ClassINET, Ttl: s.Ttl}, Ns: host}
}
// Group checks the services in sx, it looks for a Group attribute on the shortest
// keys. If there are multiple shortest keys *and* the group attribute disagrees (and
// is not empty), we don't consider it a group.
// If a group is found, only services with *that* group (or no group) will be returned.
func Group(sx []Service) []Service {
if len(sx) == 0 {
return sx
}
// Shortest key with group attribute sets the group for this set.
group := sx[0].Group
slashes := strings.Count(sx[0].Key, "/")
length := make([]int, len(sx))
for i, s := range sx {
x := strings.Count(s.Key, "/")
length[i] = x
if x < slashes {
if s.Group == "" {
break
}
slashes = x
group = s.Group
}
}
if group == "" {
return sx
}
ret := []Service{} // with slice-tricks in sx we can prolly save this allocation (TODO)
for i, s := range sx {
if s.Group == "" {
ret = append(ret, s)
continue
}
// Disagreement on the same level
if length[i] == slashes && s.Group != group {
return sx
}
if s.Group == group {
ret = append(ret, s)
}
}
return ret
}
// Split255 splits a string into 255 byte chunks.
func split255(s string) []string {
if len(s) < 255 {
return []string{s}
}
sx := []string{}
p, i := 0, 255
for {
if i <= len(s) {
sx = append(sx, s[p:i])
} else {
sx = append(sx, s[p:])
break
}
p, i = p+255, i+255
}
return sx
}
// targetStrip strips "targetstrip" labels from the left side of the fully qualified name.
func targetStrip(name string, targetStrip int) string {
if targetStrip == 0 {
return name
}
offset, end := 0, false
for i := 0; i < targetStrip; i++ {
offset, end = dns.NextLabel(name, offset)
}
if end {
// We overshot the name, use the orignal one.
offset = 0
}
name = name[offset:]
return name
}

View file

@ -0,0 +1,125 @@
package msg
import "testing"
func TestSplit255(t *testing.T) {
xs := split255("abc")
if len(xs) != 1 && xs[0] != "abc" {
t.Errorf("Failure to split abc")
}
s := ""
for i := 0; i < 255; i++ {
s += "a"
}
xs = split255(s)
if len(xs) != 1 && xs[0] != s {
t.Errorf("failure to split 255 char long string")
}
s += "b"
xs = split255(s)
if len(xs) != 2 || xs[1] != "b" {
t.Errorf("failure to split 256 char long string: %d", len(xs))
}
for i := 0; i < 255; i++ {
s += "a"
}
xs = split255(s)
if len(xs) != 3 || xs[2] != "a" {
t.Errorf("failure to split 510 char long string: %d", len(xs))
}
}
func TestGroup(t *testing.T) {
// Key are in the wrong order, but for this test it does not matter.
sx := Group(
[]Service{
{Host: "127.0.0.1", Group: "g1", Key: "b/sub/dom1/skydns/test"},
{Host: "127.0.0.2", Group: "g2", Key: "a/dom1/skydns/test"},
},
)
// Expecting to return the shortest key with a Group attribute.
if len(sx) != 1 {
t.Fatalf("failure to group zeroth set: %v", sx)
}
if sx[0].Key != "a/dom1/skydns/test" {
t.Fatalf("failure to group zeroth set: %v, wrong Key", sx)
}
// Groups disagree, so we will not do anything.
sx = Group(
[]Service{
{Host: "server1", Group: "g1", Key: "region1/skydns/test"},
{Host: "server2", Group: "g2", Key: "region1/skydns/test"},
},
)
if len(sx) != 2 {
t.Fatalf("failure to group first set: %v", sx)
}
// Group is g1, include only the top-level one.
sx = Group(
[]Service{
{Host: "server1", Group: "g1", Key: "a/dom/region1/skydns/test"},
{Host: "server2", Group: "g2", Key: "a/subdom/dom/region1/skydns/test"},
},
)
if len(sx) != 1 {
t.Fatalf("failure to group second set: %v", sx)
}
// Groupless services must be included.
sx = Group(
[]Service{
{Host: "server1", Group: "g1", Key: "a/dom/region1/skydns/test"},
{Host: "server2", Group: "g2", Key: "a/subdom/dom/region1/skydns/test"},
{Host: "server2", Group: "", Key: "b/subdom/dom/region1/skydns/test"},
},
)
if len(sx) != 2 {
t.Fatalf("failure to group third set: %v", sx)
}
// Empty group on the highest level: include that one also.
sx = Group(
[]Service{
{Host: "server1", Group: "g1", Key: "a/dom/region1/skydns/test"},
{Host: "server1", Group: "", Key: "b/dom/region1/skydns/test"},
{Host: "server2", Group: "g2", Key: "a/subdom/dom/region1/skydns/test"},
},
)
if len(sx) != 2 {
t.Fatalf("failure to group fourth set: %v", sx)
}
// Empty group on the highest level: include that one also, and the rest.
sx = Group(
[]Service{
{Host: "server1", Group: "g5", Key: "a/dom/region1/skydns/test"},
{Host: "server1", Group: "", Key: "b/dom/region1/skydns/test"},
{Host: "server2", Group: "g5", Key: "a/subdom/dom/region1/skydns/test"},
},
)
if len(sx) != 3 {
t.Fatalf("failure to group fith set: %v", sx)
}
// One group.
sx = Group(
[]Service{
{Host: "server1", Group: "g6", Key: "a/dom/region1/skydns/test"},
},
)
if len(sx) != 1 {
t.Fatalf("failure to group sixth set: %v", sx)
}
// No group, once service
sx = Group(
[]Service{
{Host: "server1", Key: "a/dom/region1/skydns/test"},
},
)
if len(sx) != 1 {
t.Fatalf("failure to group seventh set: %v", sx)
}
}

View file

@ -0,0 +1,17 @@
package kubernetes
import (
"strings"
"github.com/miekg/dns"
)
// Domain is the opposite of Path.
func (k Kubernetes) Domain(s string) string {
l := strings.Split(s, "/")
// start with 1, to strip /skydns
for i, j := 1, len(l)-1; i < j; i, j = i+1, j-1 {
l[i], l[j] = l[j], l[i]
}
return dns.Fqdn(strings.Join(l[1:len(l)-1], "."))
}