Fix k8s integration tests (#231)
* Adding debug message when starting k8s controller * Adding work-around for timing issue in k8s integration tests * Remove unused import * Fix Makefile for ast package * Increase k8s verbosity in travis * Updating TODO list to find root cause of test issue * go fmt cleanup
This commit is contained in:
parent
8c9c4778c6
commit
2153d2defd
6 changed files with 65 additions and 46 deletions
|
@ -45,4 +45,4 @@ before_script:
|
|||
script:
|
||||
- go test -tags etcd -race -bench=. ./...
|
||||
# Run kubernetes integration tests only if kubectl is available. i.e. If kubernetes was launched
|
||||
- ./contrib/kubernetes/testscripts/kubectl version && go test -tags k8s -race -bench=. -run 'TestK8sIntegration' ./test
|
||||
- ./contrib/kubernetes/testscripts/kubectl version && go test -v -tags k8s -race -bench=. -run 'TestK8sIntegration' ./test
|
||||
|
|
1
Makefile
1
Makefile
|
@ -26,6 +26,7 @@ docker: deps
|
|||
|
||||
.PHONY: generate
|
||||
generate: ../../mholt/caddy
|
||||
go get $(BUILD_VERBOSE) golang.org/x/tools/go/ast/astutil
|
||||
go generate $(BUILD_VERSOSE)
|
||||
|
||||
.PHONY: deps
|
||||
|
|
|
@ -310,3 +310,7 @@ TBD:
|
|||
pre-loaded k8s API cache. With and without CoreDNS response caching.
|
||||
* Try to get rid of kubernetes launch scripts by moving operations into
|
||||
.travis.yml file.
|
||||
* Find root cause of timing condition that results in no data returned to
|
||||
test client when running k8s integration tests. Current work-around is a
|
||||
nasty hack of waiting 5 seconds after setting up test server before performing
|
||||
client calls. (See hack in test/kubernetes_test.go)
|
||||
|
|
|
@ -2,6 +2,7 @@ package kubernetes
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -149,6 +150,7 @@ func (dns *dnsController) Stop() error {
|
|||
|
||||
// Run starts the controller.
|
||||
func (dns *dnsController) Run() {
|
||||
log.Printf("[debug] starting k8s controller: %s\n", dns)
|
||||
go dns.endpController.Run(dns.stopCh)
|
||||
go dns.svcController.Run(dns.stopCh)
|
||||
go dns.nsController.Run(dns.stopCh)
|
||||
|
|
|
@ -35,13 +35,13 @@ type Kubernetes struct {
|
|||
Selector *labels.Selector
|
||||
}
|
||||
|
||||
func (g *Kubernetes) InitKubeCache() error {
|
||||
func (k *Kubernetes) InitKubeCache() error {
|
||||
// For a custom api server or running outside a k8s cluster
|
||||
// set URL in env.KUBERNETES_MASTER or set endpoint in Corefile
|
||||
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
overrides := &clientcmd.ConfigOverrides{}
|
||||
if len(g.APIEndpoint) > 0 {
|
||||
overrides.ClusterInfo = clientcmdapi.Cluster{Server: g.APIEndpoint}
|
||||
if len(k.APIEndpoint) > 0 {
|
||||
overrides.ClusterInfo = clientcmdapi.Cluster{Server: k.APIEndpoint}
|
||||
}
|
||||
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides)
|
||||
config, err := clientConfig.ClientConfig()
|
||||
|
@ -54,19 +54,21 @@ func (g *Kubernetes) InitKubeCache() error {
|
|||
log.Printf("[ERROR] Failed to create kubernetes notification controller: %v", err)
|
||||
return err
|
||||
}
|
||||
if g.LabelSelector == nil {
|
||||
if k.LabelSelector == nil {
|
||||
log.Printf("[INFO] Kubernetes middleware configured without a label selector. No label-based filtering will be performed.")
|
||||
} else {
|
||||
var selector labels.Selector
|
||||
selector, err = unversionedapi.LabelSelectorAsSelector(g.LabelSelector)
|
||||
g.Selector = &selector
|
||||
selector, err = unversionedapi.LabelSelectorAsSelector(k.LabelSelector)
|
||||
k.Selector = &selector
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] Unable to create Selector for LabelSelector '%s'.Error was: %s", g.LabelSelector, err)
|
||||
log.Printf("[ERROR] Unable to create Selector for LabelSelector '%s'.Error was: %s", k.LabelSelector, err)
|
||||
return err
|
||||
}
|
||||
log.Printf("[INFO] Kubernetes middleware configured with the label selector '%s'. Only kubernetes objects matching this label selector will be exposed.", unversionedapi.FormatLabelSelector(g.LabelSelector))
|
||||
log.Printf("[INFO] Kubernetes middleware configured with the label selector '%s'. Only kubernetes objects matching this label selector will be exposed.", unversionedapi.FormatLabelSelector(k.LabelSelector))
|
||||
}
|
||||
g.APIConn = newdnsController(kubeClient, g.ResyncPeriod, g.Selector)
|
||||
k.APIConn = newdnsController(kubeClient, k.ResyncPeriod, k.Selector)
|
||||
|
||||
log.Printf("[debug] k8s controller initialized: %s\n", k)
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -76,11 +78,11 @@ func (g *Kubernetes) InitKubeCache() error {
|
|||
// For example, if "coredns.local" is a zone configured for the
|
||||
// Kubernetes middleware, then getZoneForName("a.b.coredns.local")
|
||||
// will return ("coredns.local", ["a", "b"]).
|
||||
func (g *Kubernetes) getZoneForName(name string) (string, []string) {
|
||||
func (k *Kubernetes) getZoneForName(name string) (string, []string) {
|
||||
var zone string
|
||||
var serviceSegments []string
|
||||
|
||||
for _, z := range g.Zones {
|
||||
for _, z := range k.Zones {
|
||||
if dns.IsSubDomain(z, name) {
|
||||
zone = z
|
||||
|
||||
|
@ -97,12 +99,12 @@ func (g *Kubernetes) getZoneForName(name string) (string, []string) {
|
|||
// If exact is true, it will lookup just
|
||||
// this name. This is used when find matches when completing SRV lookups
|
||||
// for instance.
|
||||
func (g *Kubernetes) Records(name string, exact bool) ([]msg.Service, error) {
|
||||
func (k *Kubernetes) Records(name string, exact bool) ([]msg.Service, error) {
|
||||
// TODO: refector this.
|
||||
// Right now GetNamespaceFromSegmentArray do not supports PRE queries
|
||||
if strings.HasSuffix(name, arpaSuffix) {
|
||||
ip, _ := extractIP(name)
|
||||
records := g.getServiceRecordForIP(ip, name)
|
||||
records := k.getServiceRecordForIP(ip, name)
|
||||
return records, nil
|
||||
}
|
||||
var (
|
||||
|
@ -111,14 +113,14 @@ func (g *Kubernetes) Records(name string, exact bool) ([]msg.Service, error) {
|
|||
typeName string
|
||||
)
|
||||
|
||||
zone, serviceSegments := g.getZoneForName(name)
|
||||
zone, serviceSegments := k.getZoneForName(name)
|
||||
|
||||
// TODO: Implementation above globbed together segments for the serviceName if
|
||||
// multiple segments remained. Determine how to do similar globbing using
|
||||
// the template-based implementation.
|
||||
namespace = g.NameTemplate.GetNamespaceFromSegmentArray(serviceSegments)
|
||||
serviceName = g.NameTemplate.GetServiceFromSegmentArray(serviceSegments)
|
||||
typeName = g.NameTemplate.GetTypeFromSegmentArray(serviceSegments)
|
||||
namespace = k.NameTemplate.GetNamespaceFromSegmentArray(serviceSegments)
|
||||
serviceName = k.NameTemplate.GetServiceFromSegmentArray(serviceSegments)
|
||||
typeName = k.NameTemplate.GetTypeFromSegmentArray(serviceSegments)
|
||||
|
||||
if namespace == "" {
|
||||
err := errors.New("Parsing query string did not produce a namespace value. Assuming wildcard namespace.")
|
||||
|
@ -137,12 +139,11 @@ func (g *Kubernetes) Records(name string, exact bool) ([]msg.Service, error) {
|
|||
|
||||
// Abort if the namespace does not contain a wildcard, and namespace is not published per CoreFile
|
||||
// Case where namespace contains a wildcard is handled in Get(...) method.
|
||||
if (!nsWildcard) && (len(g.Namespaces) > 0) && (!util.StringInSlice(namespace, g.Namespaces)) {
|
||||
if (!nsWildcard) && (len(k.Namespaces) > 0) && (!util.StringInSlice(namespace, k.Namespaces)) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
log.Printf("[debug] before g.Get(namespace, nsWildcard, serviceName, serviceWildcard): %v %v %v %v", namespace, nsWildcard, serviceName, serviceWildcard)
|
||||
k8sItems, err := g.Get(namespace, nsWildcard, serviceName, serviceWildcard)
|
||||
k8sItems, err := k.Get(namespace, nsWildcard, serviceName, serviceWildcard)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -151,12 +152,12 @@ func (g *Kubernetes) Records(name string, exact bool) ([]msg.Service, error) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
records := g.getRecordsForServiceItems(k8sItems, nametemplate.NameValues{TypeName: typeName, ServiceName: serviceName, Namespace: namespace, Zone: zone})
|
||||
records := k.getRecordsForServiceItems(k8sItems, nametemplate.NameValues{TypeName: typeName, ServiceName: serviceName, Namespace: namespace, Zone: zone})
|
||||
return records, nil
|
||||
}
|
||||
|
||||
// TODO: assemble name from parts found in k8s data based on name template rather than reusing query string
|
||||
func (g *Kubernetes) getRecordsForServiceItems(serviceItems []api.Service, values nametemplate.NameValues) []msg.Service {
|
||||
func (k *Kubernetes) getRecordsForServiceItems(serviceItems []api.Service, values nametemplate.NameValues) []msg.Service {
|
||||
var records []msg.Service
|
||||
|
||||
for _, item := range serviceItems {
|
||||
|
@ -179,8 +180,8 @@ func (g *Kubernetes) getRecordsForServiceItems(serviceItems []api.Service, value
|
|||
}
|
||||
|
||||
// Get performs the call to the Kubernetes http API.
|
||||
func (g *Kubernetes) Get(namespace string, nsWildcard bool, servicename string, serviceWildcard bool) ([]api.Service, error) {
|
||||
serviceList := g.APIConn.GetServiceList()
|
||||
func (k *Kubernetes) Get(namespace string, nsWildcard bool, servicename string, serviceWildcard bool) ([]api.Service, error) {
|
||||
serviceList := k.APIConn.GetServiceList()
|
||||
|
||||
var resultItems []api.Service
|
||||
|
||||
|
@ -188,7 +189,7 @@ func (g *Kubernetes) Get(namespace string, nsWildcard bool, servicename string,
|
|||
if symbolMatches(namespace, item.Namespace, nsWildcard) && symbolMatches(servicename, item.Name, serviceWildcard) {
|
||||
// If namespace has a wildcard, filter results against Corefile namespace list.
|
||||
// (Namespaces without a wildcard were filtered before the call to this function.)
|
||||
if nsWildcard && (len(g.Namespaces) > 0) && (!util.StringInSlice(item.Namespace, g.Namespaces)) {
|
||||
if nsWildcard && (len(k.Namespaces) > 0) && (!util.StringInSlice(item.Namespace, k.Namespaces)) {
|
||||
continue
|
||||
}
|
||||
resultItems = append(resultItems, item)
|
||||
|
@ -216,8 +217,8 @@ func isKubernetesNameError(err error) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func (g *Kubernetes) getServiceRecordForIP(ip, name string) []msg.Service {
|
||||
svcList, err := g.APIConn.svcLister.List()
|
||||
func (k *Kubernetes) getServiceRecordForIP(ip, name string) []msg.Service {
|
||||
svcList, err := k.APIConn.svcLister.List()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -6,7 +6,9 @@ import (
|
|||
"io/ioutil"
|
||||
"log"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mholt/caddy"
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
|
@ -61,21 +63,15 @@ var testdataLookupSRV = []struct {
|
|||
{"*.*.coredns.local.", 1, 1}, // One SRV record, via namespace and service wildcard
|
||||
}
|
||||
|
||||
func testK8sIntegration(t *testing.T) {
|
||||
func TestK8sIntegration(t *testing.T) {
|
||||
|
||||
// t.Skip("Skip Kubernetes Integration tests")
|
||||
// subtests here (Go 1.7 feature).
|
||||
testLookupA(t)
|
||||
testLookupSRV(t)
|
||||
}
|
||||
|
||||
func testLookupA(t *testing.T) {
|
||||
corefile :=
|
||||
`.:0 {
|
||||
kubernetes coredns.local {
|
||||
endpoint http://localhost:8080
|
||||
namespaces demo
|
||||
}
|
||||
`
|
||||
|
||||
func createTestServer(t *testing.T, corefile string) (*caddy.Instance, string) {
|
||||
server, err := CoreDNSServer(corefile)
|
||||
if err != nil {
|
||||
t.Fatalf("could not get CoreDNS serving instance: %s", err)
|
||||
|
@ -85,10 +81,28 @@ func testLookupA(t *testing.T) {
|
|||
if udp == "" {
|
||||
t.Fatalf("could not get udp listening port")
|
||||
}
|
||||
|
||||
return server, udp
|
||||
}
|
||||
|
||||
func testLookupA(t *testing.T) {
|
||||
corefile :=
|
||||
`.:0 {
|
||||
kubernetes coredns.local {
|
||||
endpoint http://localhost:8080
|
||||
namespaces demo
|
||||
}
|
||||
|
||||
`
|
||||
server, udp := createTestServer(t, corefile)
|
||||
defer server.Stop()
|
||||
|
||||
log.SetOutput(ioutil.Discard)
|
||||
|
||||
// Work-around for timing condition that results in no-data being returned in
|
||||
// test environment.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
for _, testData := range testdataLookupA {
|
||||
dnsClient := new(dns.Client)
|
||||
dnsMessage := new(dns.Msg)
|
||||
|
@ -126,18 +140,15 @@ func testLookupSRV(t *testing.T) {
|
|||
}
|
||||
`
|
||||
|
||||
server, err := CoreDNSServer(corefile)
|
||||
if err != nil {
|
||||
t.Fatalf("could not get CoreDNS serving instance: %s", err)
|
||||
}
|
||||
udp, _ := CoreDNSServerPorts(server, 0)
|
||||
if udp == "" {
|
||||
t.Fatalf("could not get udp listening port")
|
||||
}
|
||||
server, udp := createTestServer(t, corefile)
|
||||
defer server.Stop()
|
||||
|
||||
log.SetOutput(ioutil.Discard)
|
||||
|
||||
// Work-around for timing condition that results in no-data being returned in
|
||||
// test environment.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// TODO: Add checks for A records in additional section
|
||||
|
||||
for _, testData := range testdataLookupSRV {
|
||||
|
|
Loading…
Add table
Reference in a new issue