Fix Travis IPv6 and add k8s integration testing to CI (#194)

Updating travis yaml file to:
* Force IPv6 to work in their VM environment
* Enable docker (requires VM environment and sudo)
* Run kubernetes integration tests in Travis
This commit is contained in:
Michael Richmond 2016-07-27 10:01:24 -07:00 committed by GitHub
parent 4a3b57d81b
commit 8dec292668
8 changed files with 54 additions and 13 deletions

View file

@ -1,12 +1,39 @@
sudo: required
# Trusty distribution is much faster when sudo is required
dist: trusty
services:
- docker
language: go language: go
go: go:
- 1.5 - 1.5
- 1.6 - 1.6
# In the Travis VM-based build environment, IPv6 networking is not
# enabled by default. The sysctl operations below enable IPv6.
# IPv6 is needed by some of the CoreDNS test cases. The VM environment
# is needed to have access to sudo in the test environment. Sudo is
# needed to have docker in the test environment. Docker is needed to
# launch a kubernetes instance in the test environment.
# (Dependencies are fun! :) )
before_install:
- cat /proc/net/if_inet6
- uname -a
- sudo bash -c 'if [ `cat /proc/net/if_inet6 | wc -l` = "0" ]; then echo "Enabling IPv6" ; sysctl net.ipv6.conf.all.disable_ipv6=0 ; sysctl net.ipv6.conf.default.disable_ipv6=0 ; sysctl net.ipv6.conf.lo.disable_ipv6=0 ; fi'
- cat /proc/net/if_inet6
- env
before_script: before_script:
- curl -L https://github.com/coreos/etcd/releases/download/v2.3.1/etcd-v2.3.1-linux-amd64.tar.gz -o etcd-v2.3.1-linux-amd64.tar.gz - curl -L https://github.com/coreos/etcd/releases/download/v2.3.1/etcd-v2.3.1-linux-amd64.tar.gz -o etcd-v2.3.1-linux-amd64.tar.gz
- tar xzvf etcd-v2.3.1-linux-amd64.tar.gz - tar xzvf etcd-v2.3.1-linux-amd64.tar.gz
- ./etcd-v2.3.1-linux-amd64/etcd & - ./etcd-v2.3.1-linux-amd64/etcd &
- go get - go get
- go get github.com/coreos/go-etcd/etcd - go get github.com/coreos/go-etcd/etcd
- if which docker &>/dev/null ; then docker pull gcr.io/google_containers/hyperkube-amd64:v1.2.4 ; docker ps -a ; fi
- pwd
- if which docker &>/dev/null ; then ./middleware/kubernetes/test/00_run_k8s.sh && ./middleware/kubernetes/test/10_setup_kubectl.sh && ./middleware/kubernetes/test/20_setup_k8s_services.sh ; docker ps -a ; fi
script: script:
- go test -tags etcd -race -bench=. ./... - go test -tags etcd -race -bench=. ./...
- ./middleware/kubernetes/test/kubectl version && go test -tags k8s -race -bench=. -run 'TestK8sIntegration' ./test

View file

@ -22,8 +22,7 @@ test:
.PHONY: testk8s .PHONY: testk8s
testk8s: testk8s:
# go test $(TEST_VERBOSE) -tags=k8sIntegration ./... go test $(TEST_VERBOSE) -tags=k8s -run 'TestK8sIntegration' ./test
go test $(TEST_VERBOSE) -tags=k8sIntegration -run 'TestK8sIntegration' ./test
.PHONY: clean .PHONY: clean
clean: clean:

View file

@ -102,7 +102,7 @@ kubectl to communicate with kubernetes running on the localhost:
~~~ ~~~
#!/bin/bash #!/bin/bash
BASEDIR=`realpath $(dirname ${0})` BASEDIR=`readlink -e $(dirname ${0})`
${BASEDIR}/kubectl config set-cluster test-doc --server=http://localhost:8080 ${BASEDIR}/kubectl config set-cluster test-doc --server=http://localhost:8080
${BASEDIR}/kubectl config set-context test-doc --cluster=test-doc ${BASEDIR}/kubectl config set-context test-doc --cluster=test-doc
@ -343,12 +343,14 @@ TBD:
* Do we need to generate synthetic zone records for namespaces? * Do we need to generate synthetic zone records for namespaces?
* Do we need to generate synthetic zone records for the skydns synthetic zones? * Do we need to generate synthetic zone records for the skydns synthetic zones?
* Test cases * Test cases
* ~~Implement test cases for http data parsing using dependency injection
for http get operations.~~
* Test with CoreDNS caching. CoreDNS caching for DNS response is working * Test with CoreDNS caching. CoreDNS caching for DNS response is working
using the `cache` directive. Tested working using 20s cache timeout using the `cache` directive. Tested working using 20s cache timeout
and A-record queries. Automate testing with cache in place. and A-record queries. Automate testing with cache in place.
* Automate CoreDNS performance tests. Initially for zone files, and for * Automate CoreDNS performance tests. Initially for zone files, and for
pre-loaded k8s API cache. pre-loaded k8s API cache.
* Automate integration testing with kubernetes. (k8s launch and service start-up * Try to get rid of kubernetes launch scripts by moving operations into
automation is in middleware/kubernetes/tests) .travis.yml file.
* ~~Implement test cases for http data parsing using dependency injection
for http get operations.~~
* ~~Automate integration testing with kubernetes. (k8s launch and service start-up
automation is in middleware/kubernetes/tests)~~

View file

@ -7,7 +7,6 @@ K8S_VERSION="v1.2.4"
ARCH="amd64" ARCH="amd64"
export K8S_VERSION export K8S_VERSION
export ARCH export ARCH

View file

@ -1,7 +1,7 @@
#!/bin/bash #!/bin/bash
PWD=`pwd` PWD=`pwd`
BASEDIR=`realpath $(dirname ${0})` BASEDIR=`readlink -e $(dirname ${0})`
cd ${BASEDIR} cd ${BASEDIR}
if [ ! -e kubectl ]; then if [ ! -e kubectl ]; then

View file

@ -2,6 +2,11 @@
# Running skydns based on instructions at: https://testdatamanagement.wordpress.com/2015/09/01/running-kubernetes-in-docker-with-dns-on-a-single-node/ # Running skydns based on instructions at: https://testdatamanagement.wordpress.com/2015/09/01/running-kubernetes-in-docker-with-dns-on-a-single-node/
PWD=`pwd`
BASEDIR=`readlink -e $(dirname ${0})`
cd ${BASEDIR}
KUBECTL='./kubectl' KUBECTL='./kubectl'
#RUN_SKYDNS="yes" #RUN_SKYDNS="yes"
@ -15,7 +20,7 @@ wait_until_k8s_ready() {
if [ "${?}" = "0" ]; then if [ "${?}" = "0" ]; then
break break
else else
echo "sleeping for 5 seconds" echo "sleeping for 5 seconds (waiting for kubernetes to start)"
sleep 5 sleep 5
fi fi
done done
@ -37,3 +42,5 @@ if [ "${RUN_SKYDNS}" = "yes" ]; then
else else
true true
fi fi
cd ${PWD}

View file

@ -1,16 +1,21 @@
#!/bin/bash #!/bin/bash
PWD=`pwd`
BASEDIR=`readlink -e $(dirname ${0})`
cd ${BASEDIR}
KUBECTL='./kubectl' KUBECTL='./kubectl'
wait_until_k8s_ready() { wait_until_k8s_ready() {
# Wait until kubernetes is up and fully responsive # Wait until kubernetes is up and fully responsive
while : while :
do do
${KUBECTL} get nodes 2>/dev/null | grep -q '127.0.0.1' ${KUBECTL} get nodes 2>/dev/null | grep -q '127.0.0.1'
if [ "${?}" = "0" ]; then if [ "${?}" = "0" ]; then
break break
else else
echo "sleeping for 5 seconds" echo "sleeping for 5 seconds (waiting for kubernetes to start)"
sleep 5 sleep 5
fi fi
done done
@ -78,3 +83,5 @@ run_and_expose_service webserver test nginx 80
echo "" echo ""
echo "Services exposed:" echo "Services exposed:"
${KUBECTL} get services --all-namespaces ${KUBECTL} get services --all-namespaces
cd ${PWD}

View file

@ -1,4 +1,4 @@
// +build k8sIntegration // +build k8s
package test package test