K8s Test Cleanup and Service PTR

Change the CI setup for K8s to be simpler. Now it just creates a
set of objects via a yaml file, making it very easy to modify
the tests.

Implement PTR for services.
This commit is contained in:
John Belamaric 2016-11-14 19:31:08 +00:00
parent 137fc33b8f
commit afe4368c34
10 changed files with 248 additions and 314 deletions

View file

@ -30,7 +30,7 @@ before_install:
before_script: before_script:
- docker run -d --net=host --name=etcd quay.io/coreos/etcd:v$ETCD_VERSION - docker run -d --net=host --name=etcd quay.io/coreos/etcd:v$ETCD_VERSION
- docker run -d --volume=/:/rootfs:ro --volume=/sys:/sys:ro --volume=/var/lib/docker/:/var/lib/docker:rw --volume=/var/lib/kubelet/:/var/lib/kubelet:rw --volume=/var/run:/var/run:rw --net=host --pid=host --privileged --name=hyperkube gcr.io/google_containers/hyperkube-amd64:v$K8S_VERSION /hyperkube kubelet --containerized --hostname-override=127.0.0.1 --api-servers=http://localhost:8080 --config=/etc/kubernetes/manifests $DNS_ARGUMENTS --allow-privileged --v=2 - docker run -d --volume=/:/rootfs:ro --volume=/sys:/sys:ro --volume=/var/lib/docker/:/var/lib/docker:rw --volume=/var/lib/kubelet/:/var/lib/kubelet:rw --volume=/var/run:/var/run:rw --volume=`pwd`/.travis:/travis --net=host --pid=host --privileged --name=hyperkube gcr.io/google_containers/hyperkube-amd64:v$K8S_VERSION /hyperkube kubelet --containerized --hostname-override=127.0.0.1 --api-servers=http://localhost:8080 --config=/etc/kubernetes/manifests $DNS_ARGUMENTS --allow-privileged --v=2
# Wait until kubectl is ready # Wait until kubectl is ready
- for i in {1..10}; do $KUBECTL version && break || sleep 5; done - for i in {1..10}; do $KUBECTL version && break || sleep 5; done
- $KUBECTL version - $KUBECTL version
@ -39,7 +39,7 @@ before_script:
- $KUBECTL config use-context test-doc - $KUBECTL config use-context test-doc
# Wait until k8s is ready # Wait until k8s is ready
- for i in {1..30}; do $KUBECTL get nodes && break || sleep 5; done - for i in {1..30}; do $KUBECTL get nodes && break || sleep 5; done
- .travis/kubernetes/setup_k8s_services.sh - $KUBECTL create -f /travis/kubernetes/dns-test.yaml
- docker ps -a - docker ps -a
script: script:

View file

@ -1,28 +0,0 @@
## Test scripts to automate kubernetes startup
Requirements:
docker
The scripts in this directory startup kubernetes with docker as the container runtime.
After starting kubernetes, a couple of kubernetes services are started to allow automatic
testing of CoreDNS with kubernetes. The kubernetes integration tests in `test/kubernetes_test.go` depend on having some sample services running. The scripts in this folder
automate the launch of kubernetes and the creation of the expected sample services.
To start up kubernetes and launch some sample services,
run the script `setup_k8s_services.sh`.
~~~
$ ./setup_k8s_services.sh
~~~
After running the above scripts, kubernetes will be running on the localhost with the following services
exposed:
~~
NAMESPACE NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default kubernetes 10.0.0.1 <none> 443/TCP 48m
demo mynginx 10.0.0.168 <none> 80/TCP 9m
demo webserver 10.0.0.28 <none> 80/TCP 2m
test mynginx 10.0.0.4 <none> 80/TCP 2m
test webserver 10.0.0.39 <none> 80/TCP 2m
~~

View file

@ -0,0 +1,151 @@
apiVersion: v1
kind: Namespace
metadata:
name: test-1
---
apiVersion: v1
kind: Namespace
metadata:
name: test-2
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: de-1-a
namespace: test-1
spec:
replicas: 1
template:
metadata:
labels:
app: app-1-a
spec:
containers:
- name: app-1-a-c
image: gcr.io/google_containers/pause-amd64:3.0
ports:
- containerPort: 80
name: http
protocol: TCP
- containerPort: 443
name: https
protocol: TCP
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: de-1-b
namespace: test-1
spec:
replicas: 1
template:
metadata:
labels:
app: app-1-b
spec:
containers:
- name: app-1-b-c
image: gcr.io/google_containers/pause-amd64:3.0
ports:
- containerPort: 80
name: http
protocol: TCP
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: de-c
namespace: test-1
spec:
replicas: 1
template:
metadata:
labels:
app: app-c
spec:
containers:
- name: app-c-c
image: gcr.io/google_containers/pause-amd64:3.0
ports:
- containerPort: 1234
name: c-port
protocol: UDP
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: de-c
namespace: test-2
spec:
replicas: 1
template:
metadata:
labels:
app: app-c
spec:
containers:
- name: app-c-c
image: gcr.io/google_containers/pause-amd64:3.0
ports:
- containerPort: 1234
name: c-port
protocol: UDP
---
apiVersion: v1
kind: Service
metadata:
name: svc-1-a
namespace: test-1
spec:
selector:
app: app-1-a
clusterIP: 10.3.0.100
ports:
- name: http
port: 80
protocol: TCP
- name: https
port: 443
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: svc-1-b
namespace: test-1
spec:
selector:
app: app-1-b
clusterIP: 10.3.0.110
ports:
- name: http
port: 80
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: svc-c
namespace: test-1
spec:
selector:
app: app-c
clusterIP: 10.3.0.115
ports:
- name: c-port
port: 1234
protocol: UDP
---
apiVersion: v1
kind: Service
metadata:
name: svc-c
namespace: test-2
spec:
selector:
app: app-c
clusterIP: 10.3.0.120
ports:
- name: c-port
port: 1234
protocol: UDP

View file

@ -1,70 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns-configmap
namespace: kube-system
data:
corefile: |
.:53 {
kubernetes coredns.local {
}
#cache 160 coredns.local
errors stdout
log stdout
}
---
apiVersion: v1
kind: ReplicationController
metadata:
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
version: v20
name: kube-dns-v20
namespace: kube-system
spec:
replicas: 1
selector:
k8s-app: kube-dns
version: v20
template:
metadata:
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
version: v20
spec:
containers:
- args:
- -conf=/cfg/corefile
image: aledbf/kube-coredns:0.6
imagePullPolicy: IfNotPresent
name: kube-dns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
volumeMounts:
- name: config-volume
mountPath: /cfg
- args:
- -cmd=nslookup kubernetes.default.svc.cluster.local localhost >/dev/null
- -port=8080
image: gcr.io/google_containers/exechealthz:1.0
imagePullPolicy: IfNotPresent
name: healthz
ports:
- containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 10m
memory: 20Mi
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns-configmap

View file

@ -1,19 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: nginx-controller
namespace: poddemo
spec:
replicas: 2
selector:
role: load-balancer
template:
metadata:
labels:
role: load-balancer
spec:
containers:
- name: nginx
image: coreos/nginx
ports:
- containerPort: 80

View file

@ -1,52 +0,0 @@
#!/bin/bash
# Running skydns based on instructions at: https://testdatamanagement.wordpress.com/2015/09/01/running-kubernetes-in-docker-with-dns-on-a-single-node/
PWD=`pwd`
BASEDIR=`readlink -e $(dirname ${0})`
cd ${BASEDIR}
KUBECTL='docker exec hyperkube /hyperkube kubectl'
#RUN_SKYDNS="yes"
RUN_SKYDNS="no"
# DNS_ARGUMENTS needs to be passed when Kubernetes is setup.
if [ "${RUN_SKYDNS}" = "yes" ]; then
DNS_ARGUMENTS="--cluster-dns=10.0.0.10 --cluster-domain=cluster.local"
else
DNS_ARGUMENTS=""
fi
wait_until_k8s_ready() {
# Wait until kubernetes is up and fully responsive
while :
do
${KUBECTL} get nodes 2>/dev/null | grep -q '127.0.0.1'
if [ "${?}" = "0" ]; then
break
else
echo "sleeping for 5 seconds (waiting for kubernetes to start)"
sleep 5
fi
done
echo "kubernetes nodes:"
${KUBECTL} get nodes
}
if [ "${RUN_SKYDNS}" = "yes" ]; then
wait_until_k8s_ready
echo "Launch kube2sky..."
docker run -d --net=host gcr.io/google_containers/kube2sky:1.11 --kube_master_url=http://127.0.0.1:8080 --domain=cluster.local
echo ""
echo "Launch SkyDNS..."
docker run -d --net=host gcr.io/google_containers/skydns:2015-03-11-001 --machines=http://localhost:4001 --addr=0.0.0.0:53 --domain=cluster.local
else
true
fi
cd ${PWD}

View file

@ -1,99 +0,0 @@
#!/bin/bash
set -x
KUBECTL='docker exec hyperkube /hyperkube kubectl'
PWD=`pwd`
cd `readlink -e $(dirname ${0})`
create_namespaces() {
for n in ${NAMESPACES};
do
echo "Creating namespace: ${n}"
${KUBECTL} get namespaces --no-headers 2>/dev/null | grep -q ${n}
if [ "${?}" != "0" ]; then
${KUBECTL} create namespace ${n}
fi
done
echo "kubernetes namespaces:"
${KUBECTL} get namespaces
}
# run_and_expose_service <servicename> <namespace> <image> <port>
run_and_expose_service() {
if [ "${#}" != "4" ]; then
return -1
fi
service="${1}"
namespace="${2}"
image="${3}"
port="${4}"
echo " starting service '${service}' in namespace '${namespace}'"
${KUBECTL} get deployment --namespace=${namespace} --no-headers 2>/dev/null | grep -q ${service}
if [ "${?}" != "0" ]; then
${KUBECTL} run ${service} --namespace=${namespace} --image=${image}
else
echo "warn: service '${service}' already running in namespace '${namespace}'"
fi
${KUBECTL} get service --namespace=${namespace} --no-headers 2>/dev/null | grep -q ${service}
if [ "${?}" != "0" ]; then
${KUBECTL} expose deployment ${service} --namespace=${namespace} --port=${port}
else
echo "warn: service '${service}' already exposed in namespace '${namespace}'"
fi
}
#run_and_expose_rc nginx-controller nginx-rc.yml poddemo 80
run_and_expose_rc() {
if [ "${#}" != "4" ]; then
return -1
fi
rc_name="${1}"
rc_file="${2}"
namespace="${3}"
port="${4}"
echo " starting replication controller '${rc_name}' from '${rc_file}' in namespace '${namespace}'"
${KUBECTL} get rc --namespace=${namespace} --no-headers 2>/dev/null | grep -q ${rc_name}
if [ "${?}" != "0" ]; then
${KUBECTL} expose -f ${rc_file} --namespace=${namespace} --port=${port}
else
echo "warn: rc '${rc_name}' already running in namespace '${namespace}'"
fi
}
echo "Starting sample kubernetes services..."
NAMESPACES="demo poddemo test"
create_namespaces
echo ""
echo "Starting services:"
run_and_expose_service mynginx demo nginx 80
run_and_expose_service webserver demo nginx 80
run_and_expose_service mynginx test nginx 80
run_and_expose_service webserver test nginx 80
echo ""
echo "Services exposed:"
${KUBECTL} get services --all-namespaces
echo ""
echo "Starting replicationcontrollers:"
run_and_expose_rc nginx-controller nginx-rc.yml poddemo 80
echo ""
echo "ReplicationControllers exposed:"
${KUBECTL} get rc --all-namespaces
cd ${PWD}

View file

@ -31,6 +31,7 @@ import (
type Kubernetes struct { type Kubernetes struct {
Next middleware.Handler Next middleware.Handler
Zones []string Zones []string
primaryZone int
Proxy proxy.Proxy // Proxy for looking up names during the resolution process Proxy proxy.Proxy // Proxy for looking up names during the resolution process
APIEndpoint string APIEndpoint string
APICertAuth string APICertAuth string
@ -53,6 +54,11 @@ func (k *Kubernetes) Services(state request.Request, exact bool, opt middleware.
return s, nil, e // Haven't implemented debug queries yet. return s, nil, e // Haven't implemented debug queries yet.
} }
// PrimaryZone will return the first non-reverse zone being handled by this middleware
func (k *Kubernetes) PrimaryZone() (string) {
return k.Zones[k.primaryZone]
}
// Reverse implements the ServiceBackend interface. // Reverse implements the ServiceBackend interface.
func (k *Kubernetes) Reverse(state request.Request, exact bool, opt middleware.Options) ([]msg.Service, []msg.Service, error) { func (k *Kubernetes) Reverse(state request.Request, exact bool, opt middleware.Options) ([]msg.Service, []msg.Service, error) {
ip := dnsutil.ExtractAddressFromReverse(state.Name()) ip := dnsutil.ExtractAddressFromReverse(state.Name())
@ -286,7 +292,8 @@ func (k *Kubernetes) getServiceRecordForIP(ip, name string) []msg.Service {
} }
for _, service := range svcList { for _, service := range svcList {
if service.Spec.ClusterIP == ip { if service.Spec.ClusterIP == ip {
return []msg.Service{{Host: ip}} name := k.NameTemplate.RecordNameFromNameValues(nametemplate.NameValues{TypeName: "svc", ServiceName: service.ObjectMeta.Name, Namespace: service.ObjectMeta.Namespace, Zone: k.PrimaryZone()})
return []msg.Service{msg.Service{Host: name}}
} }
} }

View file

@ -71,6 +71,19 @@ func kubernetesParse(c *caddy.Controller) (*Kubernetes, error) {
return nil, errors.New("Zone name must be provided for kubernetes middleware.") return nil, errors.New("Zone name must be provided for kubernetes middleware.")
} }
k8s.primaryZone = -1
for i, z := range k8s.Zones {
if strings.HasSuffix(z, "in-addr.arpa.") || strings.HasSuffix(z, "ip6.arpa.") {
continue
}
k8s.primaryZone = i
break
}
if k8s.primaryZone == -1 {
return nil, errors.New("A non-reverse zone name must be given for Kubernetes.")
}
for c.NextBlock() { for c.NextBlock() {
switch c.Val() { switch c.Val() {
case "template": case "template":

View file

@ -17,144 +17,175 @@ import (
var dnsTestCases = []test.Case{ var dnsTestCases = []test.Case{
{ {
Qname: "mynginx.demo.svc.coredns.local.", Qtype: dns.TypeA, Qname: "svc-1-a.test-1.svc.cluster.local.", Qtype: dns.TypeA,
Rcode: dns.RcodeSuccess, Rcode: dns.RcodeSuccess,
Answer: []dns.RR{ Answer: []dns.RR{
test.A("mynginx.demo.svc.coredns.local. 1800 IN A 10.3.0.10"), test.A("svc-1-a.test-1.svc.cluster.local. 303 IN A 10.3.0.100"),
}, },
}, },
{ {
Qname: "bogusservice.demo.svc.coredns.local.", Qtype: dns.TypeA, Qname: "bogusservice.test-1.svc.cluster.local.", Qtype: dns.TypeA,
Rcode: dns.RcodeNameError, Rcode: dns.RcodeNameError,
Answer: []dns.RR{}, Answer: []dns.RR{},
}, },
{ {
Qname: "mynginx.*.svc.coredns.local.", Qtype: dns.TypeA, Qname: "svc-1-a.*.svc.cluster.local.", Qtype: dns.TypeA,
Rcode: dns.RcodeSuccess, Rcode: dns.RcodeSuccess,
Answer: []dns.RR{ Answer: []dns.RR{
test.A("mynginx.demo.svc.coredns.local. 1800 IN A 10.3.0.10"), test.A("svc-1-a.test-1.svc.cluster.local. 303 IN A 10.3.0.100"),
}, },
}, },
{ {
Qname: "mynginx.any.svc.coredns.local.", Qtype: dns.TypeA, Qname: "svc-1-a.any.svc.cluster.local.", Qtype: dns.TypeA,
Rcode: dns.RcodeSuccess, Rcode: dns.RcodeSuccess,
Answer: []dns.RR{ Answer: []dns.RR{
test.A("mynginx.demo.svc.coredns.local. 1800 IN A 10.3.0.10"), test.A("svc-1-a.test-1.svc.cluster.local. 303 IN A 10.3.0.100"),
}, },
}, },
{ {
Qname: "bogusservice.*.svc.coredns.local.", Qtype: dns.TypeA, Qname: "bogusservice.*.svc.cluster.local.", Qtype: dns.TypeA,
Rcode: dns.RcodeNameError, Rcode: dns.RcodeNameError,
Answer: []dns.RR{}, Answer: []dns.RR{},
}, },
{ {
Qname: "bogusservice.any.svc.coredns.local.", Qtype: dns.TypeA, Qname: "bogusservice.any.svc.cluster.local.", Qtype: dns.TypeA,
Rcode: dns.RcodeNameError, Rcode: dns.RcodeNameError,
Answer: []dns.RR{}, Answer: []dns.RR{},
}, },
{ {
Qname: "*.demo.svc.coredns.local.", Qtype: dns.TypeA, Qname: "*.test-1.svc.cluster.local.", Qtype: dns.TypeA,
Rcode: dns.RcodeSuccess, Rcode: dns.RcodeSuccess,
Answer: []dns.RR{ Answer: []dns.RR{
test.A("mynginx.demo.svc.coredns.local. 1800 IN A 10.3.0.10"), test.A("svc-1-a.test-1.svc.cluster.local. 303 IN A 10.3.0.100"),
test.A("webserver.demo.svc.coredns.local. 1800 IN A 10.3.0.20"), test.A("svc-1-b.test-1.svc.cluster.local. 303 IN A 10.3.0.110"),
test.A("svc-c.test-1.svc.cluster.local. 303 IN A 10.3.0.115"),
}, },
}, },
{ {
Qname: "any.demo.svc.coredns.local.", Qtype: dns.TypeA, Qname: "any.test-1.svc.cluster.local.", Qtype: dns.TypeA,
Rcode: dns.RcodeSuccess, Rcode: dns.RcodeSuccess,
Answer: []dns.RR{ Answer: []dns.RR{
test.A("mynginx.demo.svc.coredns.local. 1800 IN A 10.3.0.10"), test.A("svc-1-a.test-1.svc.cluster.local. 303 IN A 10.3.0.100"),
test.A("webserver.demo.svc.coredns.local. 1800 IN A 10.3.0.20"), test.A("svc-1-b.test-1.svc.cluster.local. 303 IN A 10.3.0.110"),
test.A("svc-c.test-1.svc.cluster.local. 303 IN A 10.3.0.115"),
}, },
}, },
{ {
Qname: "any.test.svc.coredns.local.", Qtype: dns.TypeA, Qname: "any.test-2.svc.cluster.local.", Qtype: dns.TypeA,
Rcode: dns.RcodeNameError, Rcode: dns.RcodeNameError,
Answer: []dns.RR{}, Answer: []dns.RR{},
}, },
{ {
Qname: "*.test.svc.coredns.local.", Qtype: dns.TypeA, Qname: "*.test-2.svc.cluster.local.", Qtype: dns.TypeA,
Rcode: dns.RcodeNameError, Rcode: dns.RcodeNameError,
Answer: []dns.RR{}, Answer: []dns.RR{},
}, },
{ {
Qname: "*.*.svc.coredns.local.", Qtype: dns.TypeA, Qname: "*.*.svc.cluster.local.", Qtype: dns.TypeA,
Rcode: dns.RcodeSuccess, Rcode: dns.RcodeSuccess,
Answer: []dns.RR{ Answer: []dns.RR{
test.A("mynginx.demo.svc.coredns.local. 1800 IN A 10.3.0.10"), test.A("svc-1-a.test-1.svc.cluster.local. 303 IN A 10.3.0.100"),
test.A("webserver.demo.svc.coredns.local. 1800 IN A 10.3.0.20"), test.A("svc-1-b.test-1.svc.cluster.local. 303 IN A 10.3.0.110"),
test.A("svc-c.test-1.svc.cluster.local. 303 IN A 10.3.0.115"),
}, },
}, },
//TODO: Fix below to all use test.SRV not test.A! //TODO: Fix below to all use test.SRV not test.A!
{ {
Qname: "mynginx.demo.svc.coredns.local.", Qtype: dns.TypeSRV, Qname: "svc-1-a.test-1.svc.cluster.local.", Qtype: dns.TypeSRV,
Rcode: dns.RcodeSuccess, Rcode: dns.RcodeSuccess,
Answer: []dns.RR{ Answer: []dns.RR{
test.A("mynginx.demo.svc.coredns.local. 1800 IN A 10.3.0.10"), test.SRV("_http._tcp.svc-1-a.test-1.svc.cluster.local. 303 IN SRV 10 100 80 svc-1-a.test-1.svc.cluster.local."),
test.SRV("_https._tcp.svc-1-a.test-1.svc.cluster.local. 303 IN SRV 10 100 443 svc-1-a.test-1.svc.cluster.local."),
}, },
}, },
{ {
Qname: "bogusservice.demo.svc.coredns.local.", Qtype: dns.TypeSRV, Qname: "bogusservice.test-1.svc.cluster.local.", Qtype: dns.TypeSRV,
Rcode: dns.RcodeNameError, Rcode: dns.RcodeNameError,
Answer: []dns.RR{}, Answer: []dns.RR{},
}, },
{ {
Qname: "mynginx.*.svc.coredns.local.", Qtype: dns.TypeSRV, Qname: "svc-1-a.*.svc.cluster.local.", Qtype: dns.TypeSRV,
Rcode: dns.RcodeSuccess, Rcode: dns.RcodeSuccess,
Answer: []dns.RR{ Answer: []dns.RR{
test.A("mynginx.demo.svc.coredns.local. 1800 IN A 10.3.0.10"), test.SRV("_http._tcp.svc-1-a.test-1.svc.cluster.local. 303 IN SRV 10 100 80 svc-1-a.test-1.svc.cluster.local."),
test.SRV("_https._tcp.svc-1-a.test-1.svc.cluster.local. 303 IN SRV 10 100 443 svc-1-a.test-1.svc.cluster.local."),
}, },
}, },
{ {
Qname: "mynginx.any.svc.coredns.local.", Qtype: dns.TypeSRV, Qname: "svc-1-a.any.svc.cluster.local.", Qtype: dns.TypeSRV,
Rcode: dns.RcodeSuccess, Rcode: dns.RcodeSuccess,
Answer: []dns.RR{ Answer: []dns.RR{
test.A("mynginx.demo.svc.coredns.local. 1800 IN A 10.3.0.10"), test.SRV("_http._tcp.svc-1-a.test-1.svc.cluster.local. 303 IN SRV 10 100 80 svc-1-a.test-1.svc.cluster.local."),
test.SRV("_https._tcp.svc-1-a.test-1.svc.cluster.local. 303 IN SRV 10 100 443 svc-1-a.test-1.svc.cluster.local."),
}, },
}, },
{ {
Qname: "bogusservice.*.svc.coredns.local.", Qtype: dns.TypeSRV, Qname: "bogusservice.*.svc.cluster.local.", Qtype: dns.TypeSRV,
Rcode: dns.RcodeNameError, Rcode: dns.RcodeNameError,
Answer: []dns.RR{}, Answer: []dns.RR{},
}, },
{ {
Qname: "bogusservice.any.svc.coredns.local.", Qtype: dns.TypeSRV, Qname: "bogusservice.any.svc.cluster.local.", Qtype: dns.TypeSRV,
Rcode: dns.RcodeNameError, Rcode: dns.RcodeNameError,
Answer: []dns.RR{}, Answer: []dns.RR{},
}, },
{ {
Qname: "*.demo.svc.coredns.local.", Qtype: dns.TypeSRV, Qname: "*.test-1.svc.cluster.local.", Qtype: dns.TypeSRV,
Rcode: dns.RcodeSuccess, Rcode: dns.RcodeSuccess,
Answer: []dns.RR{ Answer: []dns.RR{
test.A("mynginx.demo.svc.coredns.local. 1800 IN A 10.3.0.10"), test.SRV("_http._tcp.svc-1-a.test-1.svc.cluster.local. 303 IN SRV 10 100 80 svc-1-a.test-1.svc.cluster.local."),
test.A("webserver.demo.svc.coredns.local. 1800 IN A 10.3.0.20"), test.SRV("_https._tcp.svc-1-a.test-1.svc.cluster.local. 303 IN SRV 10 100 443 svc-1-a.test-1.svc.cluster.local."),
test.SRV("_http._tcp.svc-1-b.test-1.svc.cluster.local. 303 IN SRV 10 100 80 svc-1-b.test-1.svc.cluster.local."),
test.SRV("_c-port._udp.svc-c.test-1.svc.cluster.local. 303 IN SRV 10 100 1234 svc-c.test-1.svc.cluster.local."),
}, },
}, },
{ {
Qname: "any.demo.svc.coredns.local.", Qtype: dns.TypeSRV, Qname: "any.test-1.svc.cluster.local.", Qtype: dns.TypeSRV,
Rcode: dns.RcodeSuccess, Rcode: dns.RcodeSuccess,
Answer: []dns.RR{ Answer: []dns.RR{
test.A("mynginx.demo.svc.coredns.local. 1800 IN A 10.3.0.10"), test.SRV("_http._tcp.svc-1-a.test-1.svc.cluster.local. 303 IN SRV 10 100 80 svc-1-a.test-1.svc.cluster.local."),
test.A("webserver.demo.svc.coredns.local. 1800 IN A 10.3.0.20"), test.SRV("_https._tcp.svc-1-a.test-1.svc.cluster.local. 303 IN SRV 10 100 443 svc-1-a.test-1.svc.cluster.local."),
test.SRV("_http._tcp.svc-1-b.test-1.svc.cluster.local. 303 IN SRV 10 100 80 svc-1-b.test-1.svc.cluster.local."),
test.SRV("_c-port._udp.svc-c.test-1.svc.cluster.local. 303 IN SRV 10 100 1234 svc-c.test-1.svc.cluster.local."),
}, },
}, },
{ {
Qname: "any.test.svc.coredns.local.", Qtype: dns.TypeSRV, Qname: "any.test-2.svc.cluster.local.", Qtype: dns.TypeSRV,
Rcode: dns.RcodeNameError, Rcode: dns.RcodeNameError,
Answer: []dns.RR{}, Answer: []dns.RR{},
}, },
{ {
Qname: "*.test.svc.coredns.local.", Qtype: dns.TypeSRV, Qname: "*.test-2.svc.cluster.local.", Qtype: dns.TypeSRV,
Rcode: dns.RcodeNameError, Rcode: dns.RcodeNameError,
Answer: []dns.RR{}, Answer: []dns.RR{},
}, },
{ {
Qname: "*.*.svc.coredns.local.", Qtype: dns.TypeSRV, Qname: "*.*.svc.cluster.local.", Qtype: dns.TypeSRV,
Rcode: dns.RcodeSuccess, Rcode: dns.RcodeSuccess,
Answer: []dns.RR{ Answer: []dns.RR{
test.A("mynginx.demo.svc.coredns.local. 1800 IN A 10.3.0.10"), test.SRV("_http._tcp.svc-1-a.test-1.svc.cluster.local. 303 IN SRV 10 100 80 svc-1-a.test-1.svc.cluster.local."),
test.A("webserver.demo.svc.coredns.local. 1800 IN A 10.3.0.20"), test.SRV("_https._tcp.svc-1-a.test-1.svc.cluster.local. 303 IN SRV 10 100 443 svc-1-a.test-1.svc.cluster.local."),
test.SRV("_http._tcp.svc-1-b.test-1.svc.cluster.local. 303 IN SRV 10 100 80 svc-1-b.test-1.svc.cluster.local."),
test.SRV("_c-port._udp.svc-c.test-1.svc.cluster.local. 303 IN SRV 10 100 1234 svc-c.test-1.svc.cluster.local."),
},
},
{
Qname: "123.0.3.10.in-addr.arpa.", Qtype: dns.TypePTR,
Rcode: dns.RcodeSuccess,
Answer: []dns.RR{},
},
{
Qname: "100.0.3.10.in-addr.arpa.", Qtype: dns.TypePTR,
Rcode: dns.RcodeSuccess,
Answer: []dns.RR{
test.PTR("100.0.3.10.in-addr.arpa. 303 IN PTR svc-1-a.test-1.svc.cluster.local."),
},
},
{
Qname: "115.0.3.10.in-addr.arpa.", Qtype: dns.TypePTR,
Rcode: dns.RcodeSuccess,
Answer: []dns.RR{
test.PTR("115.0.3.10.in-addr.arpa. 303 IN PTR svc-c.test-1.svc.cluster.local."),
}, },
}, },
} }
@ -176,12 +207,12 @@ func createTestServer(t *testing.T, corefile string) (*caddy.Instance, string) {
func TestKubernetesIntegration(t *testing.T) { func TestKubernetesIntegration(t *testing.T) {
corefile := corefile :=
`.:0 { `.:0 {
kubernetes coredns.local { kubernetes cluster.local 0.3.10.in-addr.arpa {
endpoint http://localhost:8080 endpoint http://localhost:8080
#endpoint https://kubernetes/ admin.pem admin-key.pem ca.pem
#endpoint https://kubernetes/ #endpoint https://kubernetes/
#tls admin.pem admin-key.pem ca.pem
#tls k8s_auth/client2.crt k8s_auth/client2.key k8s_auth/ca2.crt #tls k8s_auth/client2.crt k8s_auth/client2.key k8s_auth/ca2.crt
namespaces demo namespaces test-1
} }
` `
server, udp := createTestServer(t, corefile) server, udp := createTestServer(t, corefile)