Skip to content

Commit

Permalink
Merge pull request #611 from rmohr/providers
Browse files Browse the repository at this point in the history
Introduce interface to support different envs
  • Loading branch information
stu-gott committed Jan 5, 2018
2 parents 4dd94f6 + 4f615c1 commit 8606c9e
Show file tree
Hide file tree
Showing 36 changed files with 448 additions and 555 deletions.
7 changes: 7 additions & 0 deletions .gitignore
Expand Up @@ -28,3 +28,10 @@ tags
hack/gen-swagger-doc/*.adoc
hack/gen-swagger-doc/*.md
hack/gen-swagger-doc/html5
hack/config-provider-local.sh
hack/config-provider-vagrant.sh
cluster/local/certs
**.swp
**.pem
**.crt
**.csr
5 changes: 3 additions & 2 deletions .travis.yml
Expand Up @@ -20,6 +20,7 @@ install:
- go get github.com/mattn/goveralls
- go get -u github.com/Masterminds/glide
- go get golang.org/x/tools/cmd/goimports
- go get -u mvdan.cc/sh/cmd/shfmt
- go get -u github.com/golang/mock/gomock
- go get -u github.com/rmohr/mock/mockgen
- go get -u github.com/rmohr/go-swagger-utils/swagger-doc
Expand All @@ -30,9 +31,9 @@ install:
- make sync

script:
- make fmt
- make fmt fmt-bash
- if git diff --name-only | grep '.*.go'; then echo "It seems like you did not run
`make fmt`. Please run it and commit the changes"; false; fi
`make fmt fmt-bash`. Please run it and commit the changes"; false; fi
- make generate
- make fmt
- if git diff --name-only | grep 'generated.*.go'; then echo "Content of generated
Expand Down
32 changes: 18 additions & 14 deletions Makefile
Expand Up @@ -35,6 +35,9 @@ vet:
fmt:
goimports -w -local kubevirt.io cmd/ pkg/ tests/

fmt-bash:
shfmt -i 4 -w cluster/ hack/ images/

test: build
./hack/build-go.sh test ${WHAT}

Expand Down Expand Up @@ -66,7 +69,6 @@ checksync:
sync:
glide install --strip-vendor
${HASH} glide.lock > .glide.lock.hash


docker: build
./hack/build-docker.sh build ${WHAT}
Expand All @@ -83,22 +85,24 @@ check: check-bash vet
check-bash:
find . -name \*.sh -exec bash -n \{\} \;

vagrant-sync-config:
./cluster/vagrant/sync_config.sh

vagrant-sync-build: build
./cluster/vagrant/sync_build.sh

vagrant-sync-optional:
./cluster/vagrant/sync_build.sh 'build optional'

vagrant-deploy: vagrant-sync-config vagrant-sync-build
export KUBECTL="cluster/kubectl.sh" && ./cluster/deploy.sh

.release-functest:
make functest > .release-functest 2>&1

release-announce: .release-functest
./hack/release-announce.sh $(RELREF) $(PREREF)

.PHONY: build fmt test clean distclean checksync sync docker manifests vet publish vagrant-sync-config vagrant-sync-build vagrant-deploy functest release-announce
cluster-up:
./cluster/up.sh

cluster-down:
./cluster/down.sh

cluster-build:
./cluster/build.sh

cluster-deploy:
./cluster/deploy.sh

cluster-sync: cluster-build cluster-deploy

.PHONY: build fmt test clean distclean checksync sync docker manifests vet publish functest release-announce fmt-bash cluster-up cluster-down cluster-deploy cluster-sync
8 changes: 8 additions & 0 deletions Vagrantfile
@@ -1,6 +1,14 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :

if ARGV.first == "up" && ENV['USING_KUBE_SCRIPTS'] != 'true'
raise Vagrant::Errors::VagrantError.new, <<END
Calling 'vagrant up' directly is not supported. Instead, please run the following:
export PROVIDER=vagrant
make cluster-up
END
end

$use_nfs = ENV['VAGRANT_USE_NFS'] == 'true'
$use_rng = ENV['VAGRANT_USE_RNG'] == 'true'
Expand Down
13 changes: 5 additions & 8 deletions automation/test.sh
Expand Up @@ -57,16 +57,16 @@ curl -LO https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSI
export VAGRANT_DOTFILE_PATH="${VAGRANT_DOTFILE_PATH:-$WORKSPACE/.vagrant}"

# Make sure that the VM is properly shut down on exit
trap '{ vagrant halt; }' EXIT
trap '{ make cluster-down; }' EXIT

set +e

# TODO handle complete workspace removal on CI
vagrant up --provider=libvirt
make cluster-up
if [ $? -ne 0 ]; then
vagrant destroy
set -e
vagrant up --provider=libvirt
make cluster-up
fi
set -e

Expand All @@ -75,9 +75,6 @@ go get golang.org/x/tools/cmd/goimports
go get -u github.com/Masterminds/glide
make

# Copy connection details for kubernetes
cluster/kubectl.sh --init

# Make sure we can connect to kubernetes
export APISERVER=$(cat cluster/vagrant/.kubeconfig | grep server | sed -e 's# \+server: https://##' | sed -e 's/\r//')
$WORKSPACE/dockerize -wait tcp://$APISERVER -timeout 300s
Expand Down Expand Up @@ -136,9 +133,9 @@ for i in ${namespaces[@]}; do
done

if [ -z "$TARGET" ] || [ "$TARGET" = "vagrant-dev" ]; then
cluster/sync.sh
make cluster-sync
elif [ "$TARGET" = "vagrant-release" ]; then
cluster/sync.sh
make cluster-sync
fi

# Wait until kubevirt pods are running
Expand Down
6 changes: 5 additions & 1 deletion cluster/sync.sh → cluster/build.sh
Expand Up @@ -20,4 +20,8 @@
# This logic moved into the Makefile.
# We're leaving this file around for people who still reference this
# specific script in their development workflow.
make vagrant-deploy

PROVIDER=${PROVIDER:-vagrant}
source cluster/$PROVIDER/provider.sh

build
23 changes: 12 additions & 11 deletions cluster/deploy.sh
Expand Up @@ -19,34 +19,35 @@

set -ex

KUBECTL=${KUBECTL:-kubectl}
PROVIDER=${PROVIDER:-vagrant}

source cluster/$PROVIDER/provider.sh
source hack/config.sh

echo "Cleaning up ..."
# Work around https://github.com/kubernetes/kubernetes/issues/33517
$KUBECTL delete ds -l "kubevirt.io" -n kube-system --cascade=false --grace-period 0 2>/dev/null || :
$KUBECTL delete pods -n kube-system -l="kubevirt.io=libvirt" --force --grace-period 0 2>/dev/null || :
$KUBECTL delete pods -n kube-system -l="kubevirt.io=virt-handler" --force --grace-period 0 2>/dev/null || :
_kubectl delete ds -l "kubevirt.io" -n kube-system --cascade=false --grace-period 0 2>/dev/null || :
_kubectl delete pods -n kube-system -l="kubevirt.io=libvirt" --force --grace-period 0 2>/dev/null || :
_kubectl delete pods -n kube-system -l="kubevirt.io=virt-handler" --force --grace-period 0 2>/dev/null || :

# Delete everything, no matter if release, devel or infra
$KUBECTL delete -f manifests -R --grace-period 1 2>/dev/null || :
_kubectl delete -f manifests -R --grace-period 1 2>/dev/null || :

# Delete exposures
$KUBECTL delete services -l "kubevirt.io" -n kube-system
_kubectl delete services -l "kubevirt.io" -n kube-system

sleep 2

echo "Deploying ..."

# Deploy the right manifests for the right target
if [ -z "$TARGET" ] || [ "$TARGET" = "vagrant-dev" ]; then
$KUBECTL create -f manifests/dev -R $i
elif [ "$TARGET" = "vagrant-release" ]; then
$KUBECTL create -f manifests/release -R $i
if [ -z "$TARGET" ] || [ "$TARGET" = "vagrant-dev" ]; then
_kubectl create -f manifests/dev -R $i
elif [ "$TARGET" = "vagrant-release" ]; then
_kubectl create -f manifests/release -R $i
fi

# Deploy additional infra for testing
$KUBECTL create -f manifests/testing -R $i
_kubectl create -f manifests/testing -R $i

echo "Done"
5 changes: 5 additions & 0 deletions cluster/down.sh
@@ -0,0 +1,5 @@
#!/bin/bash

PROVIDER=${PROVIDER:-vagrant}
source cluster/$PROVIDER/provider.sh
down
22 changes: 5 additions & 17 deletions cluster/kubectl.sh
Expand Up @@ -17,24 +17,12 @@
# Copyright 2017 Red Hat, Inc.
#

PROVIDER=${PROVIDER:-vagrant}
source ${KUBEVIRT_PATH}cluster/$PROVIDER/provider.sh
source ${KUBEVIRT_PATH}hack/config.sh

SYNC_CONFIG=${KUBEVIRT_PATH}cluster/vagrant/sync_config.sh

if [ "$1" == "--init" ]
then
exec $SYNC_CONFIG
exit
fi

# Print usage from virtctl and kubectl
if [ "$1" == "--help" ] || [ "$1" == "-h" ] ; then
cmd/virtctl/virtctl "$@"
fi

if [ -e ${KUBEVIRT_PATH}cluster/vagrant/.kubeconfig ] &&
[ -e ${KUBEVIRT_PATH}cluster/vagrant/.kubectl ]; then
${KUBEVIRT_PATH}cluster/vagrant/.kubectl --kubeconfig=${KUBEVIRT_PATH}cluster/vagrant/.kubeconfig "$@"
if [ "$1" == "console" ] || [ "$1" == "vnc" ]; then
cmd/virtctl/virtctl "$@" --kubeconfig=${kubeconfig}
else
echo "Did you already run '$SYNC_CONFIG' to deploy kubevirt?"
_kubectl "$@"
fi
53 changes: 53 additions & 0 deletions cluster/local/provider.sh
@@ -0,0 +1,53 @@
#!/bin/bash

function _cert_dir() {
echo $GOPATH/src/kubevirt.io/kubevirt/cluster/local/certs
}

function _main_ip() {
ip -o -4 a | tr -s ' ' | cut -d' ' -f 2,4 |
grep -v -e '^lo[0-9:]*' | head -1 |
cut -d' ' -f 2 | cut -d'/' -f1
}

function up() {
# Make sure that local config is correct
prepare_config

go get -d k8s.io/kubernetes

export API_HOST_IP=$(_main_ip)
export KUBELET_HOST=$(_main_ip)
export HOSTNAME_OVERRIDE=kubdev
export ALLOW_PRIVILEGED=1
export ALLOW_SECURITY_CONTEXT=1
export KUBE_DNS_DOMAIN="cluster.local"
export KUBE_DNS_SERVER_IP="10.0.0.10"
export KUBE_ENABLE_CLUSTER_DNS=true
export CERT_DIR=$(_cert_dir)
(
cd $GOPATH/src/k8s.io/kubernetes
./hack/local-up-cluster.sh
)
}

function prepare_config() {
cat >hack/config-provider-local.sh <<EOF
master_ip=$(_main_ip)
docker_tag=devel
kubeconfig=$(_cert_dir)/admin.kubeconfig
EOF
}

function build() {
make manifests docker
}

function _kubectl() {
export KUBECONFIG=$(_cert_dir)/admin.kubeconfig
$GOPATH/src/k8s.io/kubernetes/cluster/kubectl.sh "$@"
}

function down() {
echo "Not supported by this provider. Please kill the running script manually."
}
5 changes: 5 additions & 0 deletions cluster/up.sh
@@ -0,0 +1,5 @@
#!/bin/bash

PROVIDER=${PROVIDER:-vagrant}
source cluster/$PROVIDER/provider.sh
up
53 changes: 53 additions & 0 deletions cluster/vagrant/provider.sh
@@ -0,0 +1,53 @@
#!/bin/bash

function _main_ip() {
echo 192.168.200.2
}

function up() {
export USING_KUBE_SCRIPTS=true
# Make sure that the vagrant environment is up and running
vagrant up --provider=libvirt
# Synchronize kubectl config
vagrant ssh-config master 2>&1 | grep "not yet ready for SSH" >/dev/null &&
{
echo "Master node is not up"
exit 1
}

OPTIONS=$(vagrant ssh-config master | grep -v '^Host ' | awk -v ORS=' ' 'NF{print "-o " $1 "=" $2}')

scp $OPTIONS master:/usr/bin/kubectl ${KUBEVIRT_PATH}cluster/vagrant/.kubectl
chmod u+x cluster/vagrant/.kubectl

vagrant ssh master -c "sudo cat /etc/kubernetes/admin.conf" >${KUBEVIRT_PATH}cluster/vagrant/.kubeconfig

# Make sure that local config is correct
prepare_config
}

function prepare_config() {
BASE_PATH=${KUBEVIRT_PATH:-$PWD}
cat >hack/config-provider-vagrant.sh <<EOF
master_ip=$(_main_ip)
docker_tag=devel
kubeconfig=${BASE_PATH}/cluster/vagrant/.kubeconfig
EOF
}

function build() {
make build manifests
for VM in $(vagrant status | grep -v "^The Libvirt domain is running." | grep running | cut -d " " -f1); do
vagrant rsync $VM # if you do not use NFS
vagrant ssh $VM -c "cd /vagrant && export DOCKER_TAG=${docker_tag} && sudo -E hack/build-docker.sh build && sudo -E hack/build-docker.sh build optional"
done
}

function _kubectl() {
export KUBECONFIG=${KUBEVIRT_PATH}cluster/vagrant/.kubeconfig
${KUBEVIRT_PATH}cluster/vagrant/.kubectl "$@"
}

function down() {
vagrant halt
}

0 comments on commit 8606c9e

Please sign in to comment.